query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
List of outgoing edges from a vertex.
def get_successors(self, pos: tuple): if self.is_obstacle(pos): return {} else: x, y = pos[0], pos[1] neighbours = [(x + 1, y), (x + 1, y + 1), (x, y + 1), (x - 1, y + 1), (x - 1, y), (x - 1, y - 1), (x, y - 1), (x + 1, y - 1)] return {k: self.move_cost(pos, k) for k in neighbours if self.is_free(k)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def out_edges(self, vertex):\n return self[vertex].values()", "def outgoing_edges(self, vertices, labels=True):\n return list(self.outgoing_edge_iterator(vertices, labels=labels))", "def edges(self):\n es = []\n for vertex1 in self.vertices():\n for vertex2 in self.out_vertices(vertex1):\n es.append(self[vertex1][vertex2])\n return es", "def neighbors_out(self, vertex):\n return list(self.neighbor_out_iterator(vertex))", "def edges(self):\n return [edge(self.vertices[i - 1], self.vertices[(i)]) for i in range(-1, len(self.vertices))]", "def getOutEdges(self):\n edges = []\n for edict in mm.G[self].values():\n for k in edict.keys():\n edges.append(edict.get(k).get(\"edge\"))\n \n return edges", "def incident_edges(self,v,outgoing=True):\n adj = self._outgoing if outgoing else self._incoming\n for edge in adj[v].values():\n yield edge", "def getEdges(self):\n edgeList = []\n for v in self.adjList:\n for i in range(len(self.adjList[v])):\n edgeList.append((v, self.adjList[v][i]))\n return edgeList", "def getEdges(self):\n # for node in graph,\n # return node -> node for j in graph[node]\n\n return [\"->\".join([str(n1), str(n2)]) for n1 in self.graph.keys() for n2 in self.graph[n1]]", "def outgoing_edge_iterator(self, vertices, labels=True):\n if vertices is None:\n vertices = self\n elif vertices in self:\n vertices = [vertices]\n else:\n vertices = [v for v in vertices if v in self]\n return self._backend.iterator_out_edges(vertices, labels)", "def getEdges(self):\n\n return [(cell, vertice) for cell in self.adjacent.keys() for vertice in self.adjacent[cell]]", "def iterate_outbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_outbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Outbound edges from ' + str(vertex) + ':')\n for v in vertices:\n cost = self._graph.get_cost(vertex, v)\n print('Edge from ' + str(vertex) + ' to ' + str(v) + ' with cost ' + str(cost))", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({neighbour, vertex})\n return edges", "def get_edges(self) -> []:\n graph_edges = []\n\n for vertex in self.adj_list:\n for connection in self.adj_list[vertex]:\n if (vertex, connection) not in graph_edges and (connection, vertex) not in graph_edges:\n graph_edges.append((vertex, connection))\n\n return graph_edges", "def edgesFromVertex(u):\r\n edgeRepresentation = lambda v: f\"({u}, {v}, {self.getCapacity((u, v))}, {self.getFlow((u,v))})\"\r\n return \", \".join(map(edgeRepresentation, sorted(self.adjacent[u])))", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def incoming_edges(self, vertices, labels=True):\n return list(self.incoming_edge_iterator(vertices, labels=labels))", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def out_edges(self) -> List[str]:\n return list(self.proto.out_edges)", "def get_edge_list(self):\n return [(edge.value, edge.node_from.value, edge.node_to.value) for edge in self.edges]", "def get_neighbours(self, vertex):\n output = []\n \n if vertex in self.adjacency_list:\n for neighbour in self.adjacency_list[vertex]:\n output.append([neighbour.vertex.value, neighbour.weight])\n \n return output", "def edges(self):\n edges = []\n for key in self._g:\n if self._g[key]:\n for value in self._g[key]:\n edges.append((key, value))\n return edges", "def get_edges(self):\n edges = []\n for (key, target) in self.edges.keys():\n edges.append((key, target))\n return edges", "def get_edges(graph):\n edges = []\n for vertex in graph.keys():\n connected_nodes = graph[vertex]\n for node in connected_nodes:\n edges.append(str(vertex + node))\n\n return edges", "def get_all_edges(self, where_to=OUTGOING):\n\n if where_to == Vertex.OUTGOING:\n return self._outgoing\n elif where_to == Vertex.INCOMING:\n return self._incoming", "def edges(self):\n edge_list = []\n for node1 in self.node_dict:\n for node2 in self.node_dict[node1]:\n edge_list.append((node1,\n node2,\n self.node_dict[node1][node2]))\n return edge_list", "def get_edges(self, vertex_id):\n\n return self._graph_state.get_edges(vertex_id)", "def edges(self):\n return self.graph.edges", "def edges(self):\n vertices = self.vertices(closed=True)\n\n for i in range(len(self)):\n yield(vertices[:, i], vertices[:, i+1])", "def getAdjacentVertices(self, vertex):\n return self.adjList[vertex]", "def out_vertices(self, vertex):\n return self[vertex].keys()", "def vertices(self):\n return self._outgoing.keys()", "def getEdges(self):\n return self.edgeIndex", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def get_edges(\n self,\n node_key: NodeKey,\n verb: str = None,\n direction: Optional[Direction] = None,\n limit: int = 100,\n ) -> List[Edge]:", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def extract_edges(graph):\n return graph.get_edges()", "def edges(self):\n return self.generate_edges()", "def edges_list(self):\n return self._edges_list", "def edges(self, node):\n nID = self.n2ID[node]\n return [(self.ID2n[n1ID], self.ID2n[n2ID]) for (n1ID, n2ID) in self.G.edges(nID)]", "def edges(self):\r\n return self.__generate_edges()", "def edges(self):\n return [(k, val) for k, v in self.dict.iteritems() for val in v]", "def generate_edges(graph):\n edges = []\n\n # for each node in graph\n for node in graph:\n\n # for each neighbour node of a single node\n for neighbour in graph[node]:\n # if edge exists then append\n edges.append((node, neighbour))\n return edges", "def neighbors_of(\n self, vertex, color: T.Optional[TriColor] = None\n ) -> T.Collection[T.Tuple['Vertex', int]]:\n\n neighbors = [\n (edge.sink, edge.weight or edge.sink.weight)\n for edge in vertex.out_edges\n if color is None or edge.sink.color == color\n ]\n return neighbors", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def get_edges_weighted(self):\n edges = []\n for v in self.vertices.values():\n for w in v.neighbors:\n edges.append((v.name, w.name, v.neighbors[w]))\n return edges", "def get_exiting_edges(self,node):\n exit_edge_pattern=re.compile('edge_{0}_(?P<end_node>\\w+)_(?P<iterator>\\w+)'.format(node))\n exit_edges=[]\n for index,edge in enumerate(self.edges):\n if re.match(exit_edge_pattern,edge):\n exit_edges.append(edge)\n return exit_edges", "def get_inv_neighbors(self, vertex: Vertex) -> Set[Vertex]:\n if isinstance(vertex, str):\n vertex = Vertex(vertex)\n return self._inv_transition_matrix[vertex]", "def edgesFromVertex(u):\r\n edgeRepresentation = lambda v: f\"({u}, {v}, {self.capacity[(u, v)]})\"\r\n return \", \".join(map(edgeRepresentation, self.residualNeighbors(u)))", "def edges(self) -> EdgeList:\r\n return self._edges", "def edge_vertices(edge):\n return [edge.vertex1, edge.vertex2]", "def neighbors_in(self, vertex):\n return list(self.neighbor_in_iterator(vertex))", "def bounded_edges(self):\n obj = self.Vrepresentation()\n edges = []\n for i in range(len(obj)):\n if not obj[i].is_vertex(): continue\n for j in range(i+1,len(obj)):\n if not obj[j].is_vertex(): continue\n if self.vertex_adjacency_matrix()[i,j] == 0: continue\n yield (obj[i], obj[j])", "def out(self, from_, *edge_classes):\n records = self.client.command('SELECT out({0}) FROM {1}'\n .format(','.join(self.coerce_class_names(edge_classes))\n , self.coerce_class_names(from_)))\n return [self.get_vertex(v) for v in records[0].oRecordData['out']] \\\n if records else []", "def return_adjacencies(self, vertex: np.int_):\n return self.__adj[vertex]", "def edges(self):\n edges = []\n for key in self:\n if key:\n for edge in self[key]:\n edges.append((key, edge))\n return edges", "def get_all_edges(self):\n \n ans = []\n for node_id in self.neighbors:\n for edge_to_neighbor in self.neighbors[node_id]:\n ans.append(edge_to_neighbor)\n\n return ans", "def edges(self) -> Iterable[Tuple[Node]]:\n edges = []\n for node in self.__graph_dict.keys():\n for neighbour in self.__graph_dict[node]:\n # Since all edges go both ways, we need only return one of them.\n if {neighbour, node} not in edges:\n edges.append({node, neighbour})\n yield (node, neighbour)", "def neighbor_out_iterator(self, vertex):\n return iter(set(self._backend.iterator_out_nbrs(vertex)))", "def to_edges(graph):\n return list(zip(graph[:-1], graph[1:]))", "def outE(self, from_, *edge_classes):\n records = self.client.command('SELECT outE({0}) FROM {1}'\n .format(','.join(self.coerce_class_names(edge_classes))\n , self.coerce_class_names(from_)))\n return [self.get_edge(e) for e in records[0].oRecordData['outE']] \\\n if records else []", "def edges(self):\n for e in self._edges:\n yield e", "def edges(self):\n return self._edges", "def edges(self):\n return self._edges", "def edges(self):\n return self._edges", "def edges(self):\n return self._edges", "def edges(self):\n return self._edges", "def edges(self):\n return self._edges", "def get_edges(self):\n tuples = list(self.graph.edges)\n dict = nx.get_edge_attributes(self.graph, 'name')\n edges = []\n for tuple in tuples:\n edges.append(dict[tuple])\n return edges", "def get_edges_by_vertex(self, id, type=0):\n edges = []\n for (source, target) in self.edges.keys():\n if type == 1:\n if source == id:\n edges.append((source, target))\n elif type == 2:\n if target == id:\n edges.append((source, target))\n else:\n if source == id or target == id:\n edges.append((source, target))\n return edges", "def get_edges(self):\n return self._edges", "def edgify(vertices:list)->list:\n edges = []\n for k in range(0, len(vertices) - 1):\n edges.append([vertices[k], vertices[k + 1]])\n return edges", "def edges(self):\n return [(a, b) for a in self._consequences_of\n for b in self._consequences_of[a]]", "def edges(self):\n edges = []\n for key in self:\n if key:\n for edge in self[key]:\n edges.append((key, edge, self[key][edge]))\n return edges", "def adj_to(self, v):\n\n neighbours = set()\n\n for (x, y) in self._edges:\n if x == v: neighbours.add(y)\n if y == v: neighbours.add(x)\n\n return neighbours", "def edges (self):\n return self.__edges", "def edges(self):\r\n return [\r\n (parent, child)\r\n for parent in self._children_of\r\n for child in self._children_of[parent]\r\n ]", "def get_edges(self):\n try:\n temp = self.edges\n except:\n temp = []\n return temp", "def vertices(self):\r\n return self.adjacent.keys()", "def out_edges(self, node: str, keys: bool = False, data: bool = False) -> List:\n return self.graph.out_edges(node, keys=keys, data=data)", "def edges(adj_mat, vertices):\n return [(i,j) for i,j in\n vertices if (i < j and adj_mat[i][j] == 1)]", "def get_all_vertices(self):\r\n for vertex in self.__neighbours.keys():\r\n yield vertex", "def dump_graph(self):\n\n edges = []\n for vertex in self.__graph_dict:\n mylist = list(vertex)\n logging.debug(\"mylist : \", mylist)", "def get_edge(self, vertex, where_to=OUTGOING):\n\n edge_list = None\n\n if where_to == Vertex.OUTGOING:\n edge_list = self._outgoing\n\n elif where_to == Vertex.INCOMING:\n edge_list = self._incoming\n\n for edge in edge_list:\n if edge.return_other_side(self) is vertex:\n return edge\n return None", "def get_neighbours(self, vertex):\r\n if not self.is_vertex_in_graph(vertex):\r\n raise GraphException(f\"The vertex {vertex} does not exist in the graph.\")\r\n for neighbour in self.__neighbours[vertex]:\r\n yield neighbour", "def vertices(self):\n return list(self.__graph.values())", "def edgesWithVertices(self):\n\n return {e for e in self.edges if not (e.vert1 is None or e.vert2 is None)}", "def edge_list(self) -> List[Edge]:\r\n return [edge for edge in sorted(self._edges.values(), key=attrgetter(\"key\"))]", "def all_out_edges_of_node(self, id1: int) -> dict:\r\n return self.Edges[id1]", "def edges(self) -> list[Segment]:\n result = self._edges\n return list(distinct(Segment(result[idx], copy=False) for idx in np.ndindex(*self.shape[:2])))", "def eligible_edges_with_indexes(self):\n return list(map(lambda e: (self.edges.index(e), e), self.eligible_edges))", "def eligible_edges(self):\n if len(self.edges) == 4:\n return [self.edges[0], self.edges[2]]\n return []", "def edges(self):\n result = set() # avoid double-reporting edges of undirected graph\n for secondary_map in self._outgoing.values():\n result.update(secondary_map.values()) # add edges to resulting set\n return result", "def all_out_edges_of_node(self, id1: int) -> dict:\n return self.edges_out[id1]", "def get_edges(self):\n return [tuple(edge) for edge in self._tree.tree_grid[1:3, :].T]", "def eligible_edges(self):\n return self.edges", "def edges(self):\n return self.show_edges()" ]
[ "0.79884607", "0.7752863", "0.7353233", "0.72382283", "0.7054124", "0.6991357", "0.6975038", "0.6930764", "0.6925828", "0.6879445", "0.6827964", "0.67977715", "0.67761004", "0.67647284", "0.67479455", "0.6713683", "0.66829216", "0.6643639", "0.6629576", "0.6628409", "0.6628271", "0.66224927", "0.6590273", "0.65822434", "0.6578518", "0.6562988", "0.65395886", "0.652886", "0.6516018", "0.651021", "0.6503044", "0.6501655", "0.647285", "0.64687866", "0.6441202", "0.64409024", "0.64375687", "0.64069617", "0.64069617", "0.64069617", "0.63970315", "0.6379688", "0.63785815", "0.6371694", "0.6369107", "0.63643515", "0.6358171", "0.6353664", "0.63462365", "0.6339253", "0.6315435", "0.631356", "0.6296369", "0.6289146", "0.628861", "0.6288322", "0.62849253", "0.6278536", "0.6268796", "0.6251202", "0.6222192", "0.62086964", "0.6203562", "0.6201569", "0.61976355", "0.6193851", "0.6192946", "0.6192946", "0.6192946", "0.6192946", "0.6192946", "0.6192946", "0.6189162", "0.6187574", "0.6183117", "0.6174865", "0.61543596", "0.6145893", "0.61380684", "0.61144143", "0.6111502", "0.610506", "0.60685796", "0.60618657", "0.6038609", "0.6001878", "0.59761834", "0.59644806", "0.59636074", "0.59512734", "0.5925784", "0.59013414", "0.5890387", "0.5887793", "0.588232", "0.58799845", "0.5874077", "0.5864551", "0.58599067", "0.58579314", "0.58244646" ]
0.0
-1
List of incoming edges to a vertex.
def get_predecessors(self, pos: tuple): return self.get_successors(pos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def incoming_edges(self, vertices, labels=True):\n return list(self.incoming_edge_iterator(vertices, labels=labels))", "def edges(self):\n es = []\n for vertex1 in self.vertices():\n for vertex2 in self.out_vertices(vertex1):\n es.append(self[vertex1][vertex2])\n return es", "def edges(self):\n return [edge(self.vertices[i - 1], self.vertices[(i)]) for i in range(-1, len(self.vertices))]", "def out_edges(self, vertex):\n return self[vertex].values()", "def edge_vertices(edge):\n return [edge.vertex1, edge.vertex2]", "def iterate_inbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_inbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Inbound edges from ' + str(vertex) + ':')\n for v in vertices:\n cost = self._graph.get_cost(v, vertex)\n print('Edge from ' + str(v) + ' to ' + str(vertex) + ' with cost ' + str(cost))", "def edges_list(self):\n return self._edges_list", "def getEdges(self):\n edgeList = []\n for v in self.adjList:\n for i in range(len(self.adjList[v])):\n edgeList.append((v, self.adjList[v][i]))\n return edgeList", "def get_edge_list(self):\n return [(edge.value, edge.node_from.value, edge.node_to.value) for edge in self.edges]", "def vertices(self):\n return list(self._graph)", "def vertices(self):\n return list(self.__graph.values())", "def vertices(self):\n return self._outgoing.keys()", "def neighbors_in(self, vertex):\n return list(self.neighbor_in_iterator(vertex))", "def get_neighbours(self, vertex):\n output = []\n \n if vertex in self.adjacency_list:\n for neighbour in self.adjacency_list[vertex]:\n output.append([neighbour.vertex.value, neighbour.weight])\n \n return output", "def incoming_edge_iterator(self, vertices, labels=True):\n if vertices is None:\n vertices = self\n elif vertices in self:\n vertices = [vertices]\n else:\n vertices = [v for v in vertices if v in self]\n return self._backend.iterator_in_edges(vertices, labels)", "def edges(self):\n return self.graph.edges", "def getAdjacentVertices(self, vertex):\n return self.adjList[vertex]", "def edges(self):\n edge_list = []\n for node1 in self.node_dict:\n for node2 in self.node_dict[node1]:\n edge_list.append((node1,\n node2,\n self.node_dict[node1][node2]))\n return edge_list", "def get_edges(self) -> []:\n graph_edges = []\n\n for vertex in self.adj_list:\n for connection in self.adj_list[vertex]:\n if (vertex, connection) not in graph_edges and (connection, vertex) not in graph_edges:\n graph_edges.append((vertex, connection))\n\n return graph_edges", "def vertices(self):\r\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def edges(self):\n return [(k, val) for k, v in self.dict.iteritems() for val in v]", "def vertices(self):\n return list(self.graph_dict.keys())", "def edges(self):\n edges = []\n for key in self._g:\n if self._g[key]:\n for value in self._g[key]:\n edges.append((key, value))\n return edges", "def get_all_vertices(self):\r\n for vertex in self.__neighbours.keys():\r\n yield vertex", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def list_vertices(self):\n return list(self.graph_dict.keys())", "def get_entering_edges(self,node):\n enter_edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_{0}_(?P<iterator>\\w+)'.format(node))\n enter_edges=[]\n for index,edge in enumerate(self.edges):\n if re.match(enter_edge_pattern,edge):\n enter_edges.append(edge)\n return enter_edges", "def edges_as_vertices(self) -> Iterable[Tuple[Vec3, Vec3]]:\n v = self.vertices\n for edge in self.edges:\n yield v[edge[0]], v[edge[1]]", "def edges(self):\n return self.generate_edges()", "def incident_edges(self,v,outgoing=True):\n adj = self._outgoing if outgoing else self._incoming\n for edge in adj[v].values():\n yield edge", "def vertices(self):\r\n return self.adjacent.keys()", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({neighbour, vertex})\n return edges", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def edgesFromVertex(u):\r\n edgeRepresentation = lambda v: f\"({u}, {v}, {self.getCapacity((u, v))}, {self.getFlow((u,v))})\"\r\n return \", \".join(map(edgeRepresentation, sorted(self.adjacent[u])))", "def get_edges(graph):\n edges = []\n for vertex in graph.keys():\n connected_nodes = graph[vertex]\n for node in connected_nodes:\n edges.append(str(vertex + node))\n\n return edges", "def getEdges(self):\n\n return [(cell, vertice) for cell in self.adjacent.keys() for vertice in self.adjacent[cell]]", "def edges(self):\r\n return self.__generate_edges()", "def get_edges(self):\n tuples = list(self.graph.edges)\n dict = nx.get_edge_attributes(self.graph, 'name')\n edges = []\n for tuple in tuples:\n edges.append(dict[tuple])\n return edges", "def edges(self) -> EdgeList:\r\n return self._edges", "def iterate_outbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_outbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Outbound edges from ' + str(vertex) + ':')\n for v in vertices:\n cost = self._graph.get_cost(vertex, v)\n print('Edge from ' + str(vertex) + ' to ' + str(v) + ' with cost ' + str(cost))", "def vertex_adjacencies(self):\n try:\n return self._vertex_adjacencies\n except AttributeError:\n self._vertex_adjacencies = \\\n [ [ v.index(), \n [n.index() for n in v.neighbors()] \n ] for v in self.Vrepresentation() ]\n return self._vertex_adjacencies", "def bounded_edges(self):\n obj = self.Vrepresentation()\n edges = []\n for i in range(len(obj)):\n if not obj[i].is_vertex(): continue\n for j in range(i+1,len(obj)):\n if not obj[j].is_vertex(): continue\n if self.vertex_adjacency_matrix()[i,j] == 0: continue\n yield (obj[i], obj[j])", "def get_edges(self):\n edges = []\n for (key, target) in self.edges.keys():\n edges.append((key, target))\n return edges", "def edges(self):\n return self._edges", "def edges(self):\n return self._edges", "def edges(self):\n return self._edges", "def edges(self):\n return self._edges", "def edges(self):\n return self._edges", "def edges(self):\n return self._edges", "def getEdges(self):\n return self.edgeIndex", "def get_vertices(self) -> []:\n return [i for i in self.adj_list]", "def getVertices(self):\n return list(self.adjList.keys())", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges", "def edges(self):\n edges = []\n for key in self:\n if key:\n for edge in self[key]:\n edges.append((key, edge))\n return edges", "def edges(self):\n vertices = self.vertices(closed=True)\n\n for i in range(len(self)):\n yield(vertices[:, i], vertices[:, i+1])", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def get_vertices(self):\n return self.graph.keys()", "def neighbors_out(self, vertex):\n return list(self.neighbor_out_iterator(vertex))", "def get_edges_weighted(self):\n edges = []\n for v in self.vertices.values():\n for w in v.neighbors:\n edges.append((v.name, w.name, v.neighbors[w]))\n return edges", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def edges (self):\n return self.__edges", "def edges(self):\n return [(a, b) for a in self._consequences_of\n for b in self._consequences_of[a]]", "def getEdges(self):\n # for node in graph,\n # return node -> node for j in graph[node]\n\n return [\"->\".join([str(n1), str(n2)]) for n1 in self.graph.keys() for n2 in self.graph[n1]]", "def edge_list(self) -> List[Edge]:\r\n return [edge for edge in sorted(self._edges.values(), key=attrgetter(\"key\"))]", "def get_edges(self, vertex_id):\n\n return self._graph_state.get_edges(vertex_id)", "def edgesWithVertices(self):\n\n return {e for e in self.edges if not (e.vert1 is None or e.vert2 is None)}", "def edges(self):\n for e in self._edges:\n yield e", "def vertices(self):\n s = set([x for x in self.edges.keys()])\n t = set([y for v in self.edges.values() for (y,d) in v.items()])\n v = s.union(t)\n return list(v)", "def edgify(vertices:list)->list:\n edges = []\n for k in range(0, len(vertices) - 1):\n edges.append([vertices[k], vertices[k + 1]])\n return edges", "def get_edges(\n self,\n node_key: NodeKey,\n verb: str = None,\n direction: Optional[Direction] = None,\n limit: int = 100,\n ) -> List[Edge]:", "def get_edges(self):\n return self._edges", "def edges(self, node):\n nID = self.n2ID[node]\n return [(self.ID2n[n1ID], self.ID2n[n2ID]) for (n1ID, n2ID) in self.G.edges(nID)]", "def out_vertices(self, vertex):\n return self[vertex].keys()", "def edges(self):\n return self.show_edges()", "def get_edges(self):\n try:\n temp = self.edges\n except:\n temp = []\n return temp", "def edges(self):\n edges = []\n for key in self:\n if key:\n for edge in self[key]:\n edges.append((key, edge, self[key][edge]))\n return edges", "def outgoing_edges(self, vertices, labels=True):\n return list(self.outgoing_edge_iterator(vertices, labels=labels))", "def edgesFromVertex(u):\r\n edgeRepresentation = lambda v: f\"({u}, {v}, {self.capacity[(u, v)]})\"\r\n return \", \".join(map(edgeRepresentation, self.residualNeighbors(u)))", "def extract_edges(graph):\n return graph.get_edges()", "def get_inv_neighbors(self, vertex: Vertex) -> Set[Vertex]:\n if isinstance(vertex, str):\n vertex = Vertex(vertex)\n return self._inv_transition_matrix[vertex]", "def get_edge_ids(self):\n edge_ids = []\n \n for node_id in self.nodes:\n if (isinstance(self.nodes[node_id], EdgeNode)):\n edge_ids.append(node_id)\n \n return edge_ids", "def get_edge_ids(self):\n node_ids = self.node_ids\n return [(node_ids[0], node_ids[1])]", "def return_adjacencies(self, vertex: np.int_):\n return self.__adj[vertex]", "def get_edges_by_vertex(self, id, type=0):\n edges = []\n for (source, target) in self.edges.keys():\n if type == 1:\n if source == id:\n edges.append((source, target))\n elif type == 2:\n if target == id:\n edges.append((source, target))\n else:\n if source == id or target == id:\n edges.append((source, target))\n return edges", "def train_edges(self):\n return self._train_edges", "def generate_edges(graph):\n edges = []\n\n # for each node in graph\n for node in graph:\n\n # for each neighbour node of a single node\n for neighbour in graph[node]:\n # if edge exists then append\n edges.append((node, neighbour))\n return edges", "def obtener_vertices(self):\n return list(self.vertices.keys())", "def get_all_edges(self):\n \n ans = []\n for node_id in self.neighbors:\n for edge_to_neighbor in self.neighbors[node_id]:\n ans.append(edge_to_neighbor)\n\n return ans", "def get_neighbour_edges(self, cur: Union[str, int]) -> list:\n\t\treturn [edge for edge in self.edges if cur in edge]", "def get_vertices(self):\n output = []\n \n for vertex in self.adjacency_list:\n output.append(vertex.value)\n\n return output", "def edges(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].edges.values()])", "def Adj(self, vertex_name: n) -> list:\n return self._graph[vertex_name].get_connections()", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def get_neighbours(self, vertex):\r\n if not self.is_vertex_in_graph(vertex):\r\n raise GraphException(f\"The vertex {vertex} does not exist in the graph.\")\r\n for neighbour in self.__neighbours[vertex]:\r\n yield neighbour", "def edges(self):\r\n return [\r\n (parent, child)\r\n for parent in self._children_of\r\n for child in self._children_of[parent]\r\n ]" ]
[ "0.7818218", "0.72931284", "0.7168712", "0.702791", "0.69290024", "0.6920739", "0.6894563", "0.68887043", "0.687522", "0.6836193", "0.6818338", "0.67833453", "0.6775497", "0.67643553", "0.67224413", "0.67204434", "0.66900426", "0.6682848", "0.66743505", "0.66723615", "0.66631573", "0.66631573", "0.66631573", "0.6638683", "0.66243374", "0.6616256", "0.6608568", "0.65938103", "0.65866566", "0.65808195", "0.6579519", "0.6577587", "0.65721154", "0.6565623", "0.6557108", "0.6539739", "0.6537985", "0.6537985", "0.6537985", "0.6536165", "0.6534382", "0.6517841", "0.6515311", "0.6499132", "0.64915127", "0.6477725", "0.6471376", "0.64672667", "0.6460644", "0.64568365", "0.64568365", "0.64568365", "0.64568365", "0.64568365", "0.64568365", "0.6450724", "0.6444319", "0.64340013", "0.64226305", "0.6418581", "0.6411186", "0.6406764", "0.6404094", "0.6403889", "0.6401139", "0.63899916", "0.6383538", "0.6382906", "0.6366891", "0.63663214", "0.6364812", "0.63598144", "0.6311986", "0.63105315", "0.63000685", "0.6299287", "0.6298824", "0.62952393", "0.62690663", "0.62613267", "0.62608194", "0.62521356", "0.6243117", "0.62309164", "0.62266254", "0.6223681", "0.6210586", "0.6198598", "0.6198207", "0.61893564", "0.6187054", "0.6163765", "0.6154083", "0.61292756", "0.6128053", "0.61114997", "0.61042815", "0.61033833", "0.6098966", "0.6072902", "0.6071631" ]
0.0
-1
This is the main script for the bigmacc process. It iteartes through various CEA and bigmacc operations for each key (i.e. 01011101). It ends by saving a sample of the hourly results across the key for each building in a netcdf and then wiping the project files to reset them for the next iteration.
def run(config): locator = cea.inputlocator.InputLocator(config.scenario) print('Key in run') print(config.bigmacc.key) i = config.bigmacc.key print(i) # SCENARIO SETUP --- config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i) print(config.general.project) cea.datamanagement.data_initializer.main(config) # use the scenario code to set the year for the lca and other operations that need the current year pathway_code = config.general.parent pathway_items = pathway_code.split('_') scenario_year = int(pathway_items[1]) config.emissions.year_to_calculate = scenario_year bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round) scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0') experiment_key = 'exp_{}'.format(i) print(experiment_key) keys = [int(x) for x in str(i)] if experiment_key in scen_check['Experiments'].values.tolist(): print('Experiment was finished previously, moving to next.') pass else: print('START: experiment {}.'.format(i)) # INITIALIZE TIMER --- t0 = time.perf_counter() if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)): print(' - Folder exists for experiment {}.'.format(i)) else: os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i)) print(' - Folder does not exist for experiment {}, creating now.'.format(i)) # run the archetype mapper to leverage the newly loaded typology file and set parameters print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i)) cea.datamanagement.archetypes_mapper.main(config) # run the rule checker to set the scenario parameters print(' - Running rule checker for experiment {}.'.format(i)) cea.bigmacc.bigmacc_rules.main(config) # SIMULATIONS --- print(' - Run radiation is {}.'.format(config.bigmacc.runrad)) print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data)) # checking on need for radiation simulation if config.bigmacc.runrad == True: # this nested statement is for when we rerun the simulations and no longer need to run the unique radiation if config.bigmacc.rerun != True: print(' - Running radiation simulation for experiment {}.'.format(i)) if os.path.exists(locator.get_radiation_building('B000')): print(' - Radiation folder exists for experiment {}, copying.'.format(i)) else: print(' - Radiation running for experiment {}.'.format(i)) cea.resources.radiation_daysim.radiation_main.main(config) else: # print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i)) old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data', 'solar-radiation') # distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder()) else: radfiles = config.bigmacc.copyrad # print(' - Copying radiation results from {}.'.format(radfiles)) # distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder()) print(' - Experiment {} does not require new radiation simulation.'.format(i)) # running demand forecasting if os.path.exists(locator.get_schedule_model_file('B000')): print(' - Schedules exist for experiment {}.'.format(i)) else: print(' - Schedule maker running for experiment {}.'.format(i)) schedule_maker.main(config) # check to see if we need to rerun demand or if we can copy if config.bigmacc.rerun != True: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) else: if keys[0] == 1: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) elif keys[6] == 1: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) else: cea.demand.demand_main.main(config) # print(' - Looking for demand results data from previous run for experiment {}.'.format(i)) # old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i, # config.general.scenario_name, 'outputs', 'data', 'demand') # if os.path.exists(old_demand_files): # # print(' - Copy demand results files from previous run of experiment {}.'.format(i)) # # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder()) # pass # else: # print(' - No results found.') # print(' - Running demand simulation for experiment {}.'.format(i)) # cea.demand.demand_main.main(config) if config.bigmacc.pv == True: print(' - Run PV is {}.'.format(config.bigmacc.pv)) if config.bigmacc.rerun == True: print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i)) old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar') if os.path.exists(old_pv_files): # print(' - Copying PV files from previous run of experiment {}.'.format(i)) # distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder()) pass else: print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files)) print(' - Running PV simulation for experiment {}.'.format(i)) photovoltaic.main(config) else: # if PV simulation is needed, run it. print(' - Running PV simulation for experiment {}.'.format(i)) photovoltaic.main(config) print('Run water-body exchange is {}.'.format(config.bigmacc.water)) # if water-body simulation is needed, run it. if config.bigmacc.water == True: print(' - Running water body simulation for experiment {}.'.format(i)) water.main(config) # recalculating the supply split between grid and ng in the websrook DH if keys[4] == 1: print(' - Do not run district heat recalculation.') else: print(' - Run district heat recalculation.') cea.bigmacc.wesbrook_DH.main(config) if keys[7] == 1: print(' - PV use detected. Adding PV generation to demand files.') util.write_pv_to_demand(config) else: print(' - No PV use detected.') # running the emissions and costing calculations print(' - Run cost and emissions scripts.') cea.analysis.costs.system_costs.main(config) cea.analysis.lca.main.main(config) # clone out the simulation inputs and outputs directory print(' - Transferring results directory for experiment {}.'.format(i)) new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'inputs') new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i, config.general.scenario_name, 'outputs', 'data') if config.bigmacc.rerun != True: distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path) distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path) time_elapsed = time.perf_counter() - t0 # save log information log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0') log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i), 'Completed': 'True', 'Experiment Time': '%d.2 seconds' % time_elapsed, 'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True) log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv')) log_df.to_csv(r"C:\Users\justi\Desktop\126logger_backup.csv", ) # write netcdf of hourly_results netcdf_writer.main(config, time='hourly') if config.bigmacc.rerun != True: shutil.rmtree(locator.get_costs_folder()) shutil.rmtree(locator.get_demand_results_folder()) shutil.rmtree(locator.get_lca_emissions_results_folder()) shutil.rmtree(locator.get_solar_radiation_folder()) shutil.rmtree(locator.get_potentials_folder()) else: print(' - Rerun does not require purging of the files.') # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here if keys[0] == 1: cea.datamanagement.data_initializer.main(config) else: pass print('END: experiment {}. \n'.format(i))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]\n sky_files = ['i200822_a018{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [917.75, 1033.5] # This is the target\n # Alternative star to try (bright star to bottom of target): [1015, 581.9]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=0, weight='strehl', submaps=3, instrument=osiris)\n\n ##########\n #\n # KB200101\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n # -- If you have more than one position angle, make sure to\n # clean them seperatly.\n # -- Strehl and Ref src should be the pixel coordinates of a bright\n # (but non saturated) source in the first exposure of sci_files.\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n target = 'kb200101'\n sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]\n sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [975, 1006] # This is the target\n # Alternative star to try (bright star to right of target): [1158, 994]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=1, weight='strehl', submaps=3, instrument=osiris)", "def main():\n start = 1554994269 # unix timestamp, fixed for reproducability\n stop = start + 850 * 61 # number of acqs * time between acqs\n sampling_rate = 512. # Hz\n\n # Nyquist freq needs to be larger than frequency of J-peaks\n nyquist = sampling_rate / 2 + 1\n assert nyquist > 250\n\n # Test single mass for now\n mass = 2e-15\n result = run_sim(mass, start, stop, sampling_rate)\n\n sim_name = 'sim_mass_{:g}_rate_{:g}.npz'.format(mass, sampling_rate)\n np.savez(sim_name, times=result[0], amplitudes=result[1])\n print('saved: {}'.format(sim_name))", "def main():\n # checking the directory\n cwd = os.getcwd()\n print(f'The working directory: {cwd}')\n # counting time \n start_time = time.process_time()\n # passing args\n arg = parse_arguments()\n sub_dir = arg.sub_dir\n dir_out = arg.dir_out\n file_amb = 'csv_to_clean'\n names_ambigous = defaultdict(str)\n with open(file_amb, 'r') as fh:\n for line in fh:\n name = line.strip().split('/')[2]\n names_ambigous[name] = names_ambigous.get(name, '')\n names_ambigous[name] += line.strip()\n print(f'number files: {len(names_ambigous)}')\n # checking if the output directory exist\n # if not make it\n f_pwd = os.path.join('Results', 'kmer_counts')\n # get the genus names\n cnt = 0\n for name, filename in names_ambigous.items():\n cleaned = get_csv_clean(filename)\n full_path = os.path.join(f_pwd, name)\n if os.path.exists(full_path):\n print(f'The path {full_path} exist')\n pass\n else:\n os.makedirs(full_path)\n csv_name = f'{full_path}/{name}_k2_8_chr.csv'\n print(f'Checking the full path {csv_name}')\n with open(csv_name, 'w') as fout:\n for km, cn in cleaned.items():\n fout.write(f'{km},{cn}\\n')\n cnt += 1\n # get final time of the script\n end = time.process_time()\n total_time = end - start_time\n print(f'The script takes {total_time} to finish!')\n print(f'Where read and manipulated {cnt} files')\n print('Done!')", "def main(starttime, hstart, hstop, cfg):\n\n if cfg.target is tools.Target.ICONOEM or cfg.target is tools.Target.ICONART:\n\n logging.info('ICON chemistry data for IC/BC')\n\n # Wait for meteo to finish first\n tools.check_job_completion(cfg.log_finished_dir,\"meteo\")\n\n tools.create_dir(cfg.icon_input_oae, \"online emissions input\")\n tools.create_dir(cfg.icon_input_icbc, \"icon_input_icbc\")\n tools.create_dir(cfg.icon_input_icbc_processed, \"icon_input_icbc_processed\")\n\n starttime_real = starttime + timedelta(hours = hstart)\n\n #-----------------------------------------------------\n # Remap chemistry initial conditions\n #-----------------------------------------------------\n logfile = os.path.join(cfg.log_working_dir, \"ic_chem\")\n logfile_finish = os.path.join(cfg.log_finished_dir,\"ic_chem\")\n\n # Write remap_chem namelist\n in_filename = os.path.join(cfg.input_root_chem,starttime.strftime(cfg.chem_nameformat)+'.grb')\n out_filename = os.path.join(cfg.icon_input,'oae',cfg.oae_chem_init_nc+'_dry.nc')\n in_grid_filename = in_filename\n out_grid_filename = os.path.join(cfg.input_root_grid,cfg.dynamics_grid_filename)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remap_chem'])) as input_file:\n to_write = input_file.read()\n output_nml = os.path.join(cfg.icon_work, 'icontools_remap_chem_ic.namelist')\n with open(output_nml, \"w\") as outf:\n to_write = to_write.format(cfg=cfg,\n in_filename=in_filename,\n out_filename=out_filename,\n in_grid_filename=in_grid_filename,\n out_grid_filename=out_grid_filename)\n outf.write(to_write)\n\n # Write remapfields namelist\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remapfields_chem_ic'])) as input_file:\n to_write = input_file.read()\n output_fields = os.path.join(cfg.icon_work, 'icontools_remapfields_chem_ic.namelist')\n with open(output_fields, \"w\") as outf:\n to_write = to_write.format(cfg=cfg)\n outf.write(to_write)\n\n # Write run script (remap_ic.job)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_remap_chem_ic_runjob'])) as input_file:\n to_write = input_file.read()\n output_run = os.path.join(cfg.icon_work, \"remap_chem_ic.job\")\n with open(output_run, \"w\") as outf:\n outf.write(to_write.format(\n cfg=cfg,\n logfile=logfile, logfile_finish=logfile_finish)\n )\n exitcode = subprocess.call([\"sbatch\", \"--wait\",\n os.path.join(cfg.icon_work, 'remap_chem_ic.job')])\n if exitcode != 0:\n raise RuntimeError(\"sbatch returned exitcode {}\".format(exitcode))\n logging.info(\"Remapped initial conditions with icontools\")\n\n os.remove(output_nml)\n os.remove(output_fields)\n os.remove(output_run)\n\n # Transform initial data from dry to wet mixing ratios\n cdo.expr(\"'CH4w=CH4*(1-QV)'\",input=out_filename,output='temp_file_01.nc')\n cdo.selvar(\"LNSP\",input=out_filename,output='temp_file_03.nc')\n os.remove(out_filename)\n # Rename variable to match ICON internal name with CDO:\n out_filename = os.path.join(cfg.icon_input,'oae',cfg.oae_chem_init_nc)\n cdo.chname(\"CH4w\",\"CH4\",input='temp_file_01.nc',output='temp_file_02.nc')\n cdo.merge(input='temp_file_02.nc temp_file_03.nc',output=out_filename)\n\n os.remove('temp_file_01.nc')\n os.remove('temp_file_02.nc')\n os.remove('temp_file_03.nc')\n \n\n\n #-----------------------------------------------------\n # Remap chem LBC\n #-----------------------------------------------------\n logfile = os.path.join(cfg.log_working_dir, \"lbc_chem\")\n logfile_finish = os.path.join(cfg.log_finished_dir,\"lbc_chem\")\n\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remapfields_chem_lbc'])) as input_file:\n to_write = input_file.read()\n output_nml_fields = os.path.join(cfg.icon_work, 'icontools_remapfields_chem_lbc.namelist')\n with open(output_nml_fields, \"w\") as outf:\n to_write = to_write.format(cfg=cfg)\n outf.write(to_write)\n\n for time in tools.iter_hours(starttime, hstart, hstop, cfg.meteo_inc):\n\n # Write remap_lbc namelist\n in_grid_filename = os.path.join(cfg.input_root_chem,starttime.strftime(cfg.chem_nameformat)+'.grb')\n in_filename = os.path.join(cfg.input_root_chem,time.strftime(cfg.chem_nameformat)+'.grb')\n out_grid_filename = os.path.join(cfg.icon_input_grid,cfg.lateral_boundary_grid)\n out_filename = os.path.join(cfg.icon_input_icbc,time.strftime(cfg.chem_nameformat)+'_lbc')\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_namelist_remap'])) as input_file:\n to_write = input_file.read()\n output_nml_lbc = os.path.join(cfg.icon_work, 'icontools_remap_chem_lbc.namelist')\n with open(output_nml_lbc, \"w\") as outf:\n to_write = to_write.format(cfg=cfg,\n in_grid_filename=in_grid_filename,\n in_filename=in_filename,\n out_grid_filename=out_grid_filename,\n out_filename=out_filename)\n outf.write(to_write)\n\n # Write run script (remap_chem_lbc.job)\n with open(os.path.join(cfg.case_dir,cfg.icontools_parameter['icontools_remap_chem_lbc_runjob'])) as input_file:\n to_write = input_file.read()\n output_run = os.path.join(cfg.icon_work, \"remap_chem_lbc.job\")\n with open(output_run, \"w\") as outf:\n outf.write(to_write.format(\n cfg=cfg,\n logfile=logfile, logfile_finish=logfile_finish)\n )\n exitcode = subprocess.call([\"sbatch\", \"--wait\",\n os.path.join(cfg.icon_work, 'remap_chem_lbc.job')])\n if exitcode != 0:\n raise RuntimeError(\"sbatch returned exitcode {}\".format(exitcode))\n logging.info(\"Remapped boundary conditions at {} with icontools\".format(time))\n\n os.remove(output_nml_lbc)\n os.remove(output_run)\n\n os.remove(output_nml_fields)\n\n\n #-----------------------------------------------------\n # Merge chem files with meteo files using cdo\n #-----------------------------------------------------\n\n for time in tools.iter_hours(starttime, hstart, hstop, cfg.meteo_inc):\n\n chem_file = os.path.join(cfg.icon_input_icbc,time.strftime(cfg.chem_nameformat)+'_lbc')\n meteo_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc.nc')\n var_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_var.nc')\n transform_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_transform.nc')\n name_file = os.path.join(cfg.icon_input_icbc, time.strftime(cfg.source_nameformat)+'_lbc_name.nc')\n processed_file = os.path.join(cfg.icon_input_icbc_processed, time.strftime(cfg.source_nameformat)+'_lbc.nc')\n\n # Select variable with CDO\n cdo.selvar(\"CH4\",\"QV\",input=chem_file,output=var_file)\n # Transform to wet-mixing ratios with CDO\n cdo.expr(\"'CH4w=CH4*(1-QV)'\",input=var_file,output=transform_file)\n # Rename variable to match ICON internal name with CDO:\n cdo.chname(\"CH4w\",\"oem_tracer_1\",input=transform_file,output=name_file)\n # Merge with CDO\n cdo.merge(input=name_file+' '+meteo_file,output=processed_file)\n\n # Delete temporary files\n os.remove(chem_file)\n os.remove(var_file)\n os.remove(transform_file)\n os.remove(name_file)\n\n logging.info(\"Merged chem variables to file {}\".format(meteo_file))\n\n\n\n # If COSMO (and not ICON):\n else:\n inv_to_process = []\n if cfg.target is tools.Target.COSMOGHG:\n try:\n CAMS = dict(fullname = \"CAMS\",\n nickname = \"cams\",\n executable = \"cams4int2cosmo\",\n indir = cfg.cams_dir_orig,\n outdir = cfg.cams_dir_proc,\n param = cfg.cams_parameters)\n inv_to_process.append(CAMS)\n except AttributeError:\n pass\n try:\n CT = dict(fullname = \"CarbonTracker\",\n nickname = \"ct\",\n executable = \"ctnoaa4int2cosmo\",\n indir = cfg.ct_dir_orig,\n outdir = cfg.ct_dir_proc,\n param = cfg.ct_parameters)\n inv_to_process.append(CT)\n except AttributeError:\n pass\n elif cfg.target is tools.Target.COSMOART:\n try:\n MOZART = dict(fullname = 'MOZART',\n nickname = 'mozart',\n executable = 'mozart2int2lm',\n indir = cfg.mozart_file_orig,\n outdir = cfg.mozart_dir_proc,\n param = [{'inc' : cfg.mozart_inc,\n 'suffix' : cfg.mozart_prefix}])\n inv_to_process.append(MOZART)\n except AttributeError:\n pass\n else:\n # Unknown target\n raise RuntimeError(\"Unknown target: {}\".format(cfg.target))\n\n # TO DO \n #MOZART = dict(fullname=\"MOZART\", nickname=\"mozart\",executable=\"cams4int2cosmo\")\n \n logging.info(\"Processing \" + \", \".join([i[\"fullname\"] for i in inv_to_process])+\" data\")\n\n scratch_path = os.path.join(cfg.int2lm_input,'icbc')\n tools.create_dir(scratch_path, \"icbc input\")\n\n for inv in inv_to_process:\n logging.info(inv[\"fullname\"]+\" files\")\n tools.create_dir(inv[\"outdir\"], \"processed \" + inv[\"fullname\"])\n #process_inv(starttime,hstart,hstop,increment,inv,cfg)\n \n for p in inv[\"param\"]:\n inc = p[\"inc\"]\n for time in tools.iter_hours(starttime, hstart, hstop, inc):\n logging.info(time)\n\n filename = os.path.join(inv[\"outdir\"],p[\"suffix\"]+\"_\"+time.strftime(\"%Y%m%d%H\")+\".nc\")\n if not os.path.exists(filename):\n logging.info(filename)\n try:\n to_call = getattr(tools, inv[\"executable\"])\n to_call.main(time,inv[\"indir\"],inv[\"outdir\"],p)\n except:\n logging.error(\"Preprocessing \"+inv[\"fullname\"] + \" data failed\")\n raise\n\n # copy to (temporary) run input directory\n tools.copy_file(filename, scratch_path)\n\n logging.info(\"OK\")", "def main_loop(csd_profile, csd_seed, total_ele, num_init_srcs=1000):\n csd_name = csd_profile.func_name\n print 'Using sources %s - Seed: %d ' % (csd_name, csd_seed)\n\n #TrueCSD\n t_csd_x, t_csd_y, t_csd_z, true_csd = generate_csd_3D(csd_profile, csd_seed,\n start_x=0., end_x=1., \n start_y=0., end_y=1., \n start_z=0., end_z=1.,\n res_x=100, res_y=100,\n res_z=100)\n\n #Electrodes\n ele_lims = [0.15, 0.85] #square grid, xy min,max limits\n ele_res = int(np.ceil(total_ele**(3**-1))) #resolution of electrode grid\n ele_pos, pots = electrode_config(ele_lims, ele_res, true_csd, t_csd_x, t_csd_y, t_csd_z)\n ele_x = ele_pos[:, 0]\n ele_y = ele_pos[:, 1]\n ele_z = ele_pos[:, 2]\n \n #kCSD estimation\n gdX = 0.05\n gdY = 0.05\n gdZ = 0.05\n x_lims = [.0,1.] #CSD estimation place\n y_lims = [.0,1.]\n z_lims = [.0,1.]\n params = {'h':50., \n 'gdX': gdX, 'gdY': gdY, 'gdZ': gdZ,\n 'xmin': x_lims[0], 'xmax': x_lims[1], \n 'ymin': y_lims[0], 'ymax': y_lims[1],\n 'zmin': y_lims[0], 'zmax': y_lims[1],\n 'ext': 0.0, 'n_srcs_init': num_init_srcs}\n tic = time.time() #time it\n k, est_csd = do_kcsd(ele_pos, pots, h=50., \n gdx=gdX, gdy= gdY, gdz=gdZ,\n xmin=x_lims[0], xmax=x_lims[1], \n ymin=y_lims[0], ymax=y_lims[1],\n zmin=z_lims[0], zmax=z_lims[1],\n n_src_init=num_init_srcs, src_type='step')\n toc = time.time() - tic\n\n #RMS of estimation - gives estimate of how good the reconstruction was\n chr_x, chr_y, chr_z, test_csd = generate_csd_3D(csd_profile, csd_seed,\n start_x=x_lims[0], end_x=x_lims[1],\n start_y=y_lims[0], end_y=y_lims[1],\n start_z=z_lims[0], end_z=z_lims[1],\n res_x=int((x_lims[1]-x_lims[0])/gdX), \n res_y=int((y_lims[1]-y_lims[0])/gdY),\n res_z=int((z_lims[1]-z_lims[0])/gdZ))\n rms = np.linalg.norm(abs(test_csd - est_csd[:,:,:,0]))\n rms /= np.linalg.norm(test_csd)\n\n #Plots\n title = str(k.lambd)+','+str(k.R)+', '+str(k.cv_error)+', '+str(rms)+', '+str(toc)\n save_as = csd_name+'_'+str(csd_seed)+'of'+str(total_ele)\n #save_as = csd_name+'_'+str(num_init_srcs)+'_'+str(total_ele)\n make_plots(title, \n chr_x, chr_y, chr_z, test_csd,\n ele_x, ele_y, ele_z, pots,\n k.estm_x, k.estm_y, k.estm_z, est_csd) \n #save\n result_kcsd = [k.lambd, k.R, k.cv_error, rms, toc]\n return est_csd, result_kcsd", "def run_process(hrc):\n#\n#--- set conditions for either hrc-i or hrc s\n#\n if hrc == 'hrc_i':\n out_list = 'hrc_i_list'\n data_dir = '/data/hrc/i/'\n inst = 'i'\n else:\n out_list = 'hrc_s_list'\n data_dir = '/data/hrc/s/'\n inst = 's'\n#\n#--- make a list of obsids\n#\n cmd = 'ls -d ' + data_dir + '* > ' + zspace\n os.system(cmd)\n data = mcf.read_data_file(zspace, remove=1)\n hlist = []\n for ent in data:\n atemp = re.split('\\/', ent)\n obsid = atemp[-1]\n if mcf.is_neumeric(obsid):\n hlist.append(obsid)\n\n# if hrc == 'hrc_i':\n# print(\"HRC I : \" + str(hlist))\n# else:\n# print(\"HRC S : \" + str(hlist))\n# \n for obsid in hlist:\n obsid = str(int(float(obsid)))\n\n with open(out_list, 'w') as fo:\n fo.write(str(obsid) + '\\n')\n cmd = 'rm -rf ' + data_dir + obsid + \"analysis/*\"\n os.system(cmd)\n#\n#--- extract fits data needed for analysis\n#\n chk = extract_hrc_data(obsid, data_dir)\n if chk == False:\n print(\"Not all data are available\")\n continue\n\n if hrc == 'hrc_i':\n cmd = 'csh -f ' + bin_dir + 'repro_all_new.csh hrc_i_list'\n else:\n cmd = 'csh -f ' + bin_dir + 'repro_all_S_new.csh hrc_s_list'\n\n try:\n run_ciao(cmd)\n cdir = data_dir + '/' + str(obsid)\n if os.path.isdir(cdir):\n cmd = 'chgrp -R hat ' + cdir \n os.system(cmd)\n cmd = 'chmod -R 775 ' + cdir \n os.system(cmd)\n#\n#--- directory name should be 5 digit\n#\n test = int(float(obsid))\n if test < 10000:\n chk = mcf.add_leading_zero(obsid, 5)\n odir = data_dir + '/' + str(chk)\n if os.path.isdir(odir):\n cmd = 'rm -rf ' + odir\n os.system(cmd)\n cmd = 'mv ' + cdir + ' ' + odir\n os.system(cmd)\n else:\n cmd = 'mv ' + cdir + ' ' + odir\n os.system(cmd)\n except:\n pass\n\n mcf.rm_files(out_list)\n correct_naming(obsid, inst)\n\n #chk_proccess_status(inst, hlist)", "def generate_megafile():\n\n print(\"\\nFetching testing dataset…\")\n testing = get_testing()\n\n print(\"\\nFetching ECDC dataset…\")\n ecdc = get_ecdc()\n\n location_mismatch = set(testing.location).difference(set(ecdc.location))\n for loc in location_mismatch:\n print(f\"<!> Location '{loc}' has testing data but is absent from ECDC data\")\n\n print(\"\\nFetching OxCGRT dataset…\")\n cgrt = get_cgrt()\n\n all_covid = (\n ecdc\n .merge(testing, on=[\"date\", \"location\"], how=\"outer\")\n .merge(cgrt, on=[\"date\", \"location\"], how=\"left\")\n .sort_values([\"location\", \"date\"])\n )\n\n # Add ISO codes\n print(\"Adding ISO codes…\")\n iso_codes = pd.read_csv(os.path.join(INPUT_DIR, \"iso/iso3166_1_alpha_3_codes.csv\"))\n\n missing_iso = set(all_covid.location).difference(set(iso_codes.location))\n if len(missing_iso) > 0:\n print(missing_iso)\n raise Exception(\"Missing ISO code for some locations\")\n\n all_covid = iso_codes.merge(all_covid, on=\"location\")\n\n # Add continents\n print(\"Adding continents…\")\n continents = pd.read_csv(\n os.path.join(INPUT_DIR, \"owid/continents.csv\"),\n names=[\"_1\", \"iso_code\", \"_2\", \"continent\"],\n usecols=[\"iso_code\", \"continent\"],\n header=0\n )\n\n all_covid = continents.merge(all_covid, on=\"iso_code\", how=\"right\")\n\n # Add macro variables\n # - the key is the name of the variable of interest\n # - the value is the path to the corresponding file\n macro_variables = {\n \"population\": \"un/population_2020.csv\",\n \"population_density\": \"wb/population_density.csv\",\n \"median_age\": \"un/median_age.csv\",\n \"aged_65_older\": \"wb/aged_65_older.csv\",\n \"aged_70_older\": \"un/aged_70_older.csv\",\n \"gdp_per_capita\": \"wb/gdp_per_capita.csv\",\n \"extreme_poverty\": \"wb/extreme_poverty.csv\",\n \"cardiovasc_death_rate\": \"gbd/cardiovasc_death_rate.csv\",\n \"diabetes_prevalence\": \"wb/diabetes_prevalence.csv\",\n \"female_smokers\": \"wb/female_smokers.csv\",\n \"male_smokers\": \"wb/male_smokers.csv\",\n \"handwashing_facilities\": \"un/handwashing_facilities.csv\",\n \"hospital_beds_per_thousand\": \"owid/hospital_beds.csv\",\n \"life_expectancy\": \"owid/life_expectancy.csv\",\n \"human_development_index\": \"un/human_development_index.csv\",\n }\n all_covid = add_macro_variables(all_covid, macro_variables)\n\n print(\"Writing to CSV…\")\n all_covid.to_csv(os.path.join(DATA_DIR, \"owid-covid-data.csv\"), index=False)\n\n print(\"Writing to XLSX…\")\n all_covid.to_excel(os.path.join(DATA_DIR, \"owid-covid-data.xlsx\"), index=False)\n\n print(\"Writing to JSON…\")\n df_to_json(all_covid, os.path.join(DATA_DIR, \"owid-covid-data.json\"), macro_variables.keys())\n\n # Store the last updated time\n timestamp_filename = os.path.join(DATA_DIR, \"owid-covid-data-last-updated-timestamp.txt\")\n with open(timestamp_filename, \"w\") as timestamp_file:\n timestamp_file.write(datetime.utcnow().replace(microsecond=0).isoformat())\n\n print(\"All done!\")", "def main(folder, quiet=0):\n\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n\n\n color1 = \"I4\" #filter system for first color of CMD\n color2 = \"M1\" #filter system for second color of CMD\n zeromagc1 = zero.zero_mag[color1]\n zeromagc2 = zero.zero_mag[color2]\n min_mag = 8. #minimal observation limit\n max_mag = 0. #maximal observation limit\n\n#getting file list\n files = sorted(os.listdir('%s/%s' % (os.getcwdu(), folder))) \n out = []\n\n for fil in files:\n#only using files created by the automated simulation\n if fil.startswith('sim_') and not 'settings' in fil.encode(\"ascii\"):\n print(\"%s/%s\" % (folder,fil.encode(\"ascii\")), file=output_stream)\n \n\n # Read in\n hdulist = fits.open('%s/%s' %(folder,fil))\n data = hdulist[1].data\n\n #calculating magnitudes from fluxes and converting to CMD-data\n x = -2.5*(np.log10(data['c%s' % color1]/zeromagc1) - np.log10(data['c%s' % color2]/zeromagc2))\n y = -2.5*(np.log10(data['c%s' % color2]/zeromagc2))\n\n \n sel = np.logical_and( (y > -10./3. * (x-1.) + 10.), np.logical_and(max_mag < y, y < min_mag))\n sel = np.logical_and(sel, y < -x + 12.)\n n = sum(sel)\n t = Table(hdulist[1].data)\n if 'sel' in t.columns:\n t.remove_column('sel')\n t.add_column(Column(name='sel', data=sel.astype('int')))\n \n hdulist[1].data = np.array(t)\n tmp, av, apera, age = fil.split('_')\n fits.update('%s/%s' %(folder,fil), np.array(t), ext = 1, clobber=True)\n out.append([av, apera, age, n])\n\n #writing obtained data to \"folder/__expected_number\"\n head = ['#', 'AV', 'Aperature_size', 'Age', 'Expected_number']\n f = open('%s/__expected_number' % folder, 'w')\n f.write(','.join(head)+'\\n' )\n np.savetxt(f, np.asarray(out).astype(int))\n f.close()\n \n print (\"Analysed %s files and saved output to %s\" % (len(out),'%s/__expected_number' % folder), file=output_stream)", "def main(clean_dir, rsfc_dir, atlas_dir, subject, sessions, space, desc_list, n_jobs):\n os.system(f\"export OMP_NUM_THREADS={n_jobs}\")\n assert len(desc_list) == 2\n atlases = sorted(glob(op.join(atlas_dir, \"*\")))\n\n if sessions[0] is None:\n temp_ses = glob(op.join(clean_dir, subject, \"ses-*\"))\n if len(temp_ses) > 0:\n sessions = [op.basename(x) for x in temp_ses]\n\n for session in sessions:\n if session is not None:\n clean_subj_dir = op.join(clean_dir, subject, session, \"func\")\n rsfc_subj_dir = op.join(rsfc_dir, subject, session, \"func\")\n else:\n clean_subj_dir = op.join(clean_dir, subject, \"func\")\n rsfc_subj_dir = op.join(rsfc_dir, subject, \"func\")\n\n # Collect important files\n clean_subj_files = sorted(\n glob(\n op.join(\n clean_subj_dir, f\"*task-rest*_space-{space}*_desc-{desc_list[0]}_bold.nii.gz\"\n )\n )\n )\n\n if len(clean_subj_files) > 0:\n os.makedirs(rsfc_subj_dir, exist_ok=True)\n\n # ###################\n # RSFC\n # ###################\n for clean_subj_file in clean_subj_files:\n clean_subj_name = op.basename(clean_subj_file)\n prefix = clean_subj_name.split(\"desc-\")[0].rstrip(\"_\")\n\n mask_files = sorted(glob(op.join(clean_subj_dir, f\"{prefix}_desc-brain_mask.nii.gz\")))\n assert len(mask_files) == 1\n\n mask_name = os.path.basename(mask_files[0])\n mask_file = op.join(rsfc_subj_dir, mask_name)\n copyfile(mask_files[0], mask_file)\n\n print(f\"\\tProcessing {subject}, {session} files:\", flush=True)\n print(f\"\\t\\tClean: {clean_subj_file}\", flush=True)\n print(f\"\\t\\tMask: {mask_file}\", flush=True)\n\n for atlas in atlases:\n atlas_name = op.basename(atlas)\n atlas_imgs = sorted(glob(op.join(atlas, \"*.nii.gz\")))\n assert len(atlas_imgs) == 1\n atlas_img = atlas_imgs[0]\n\n lab_files = sorted(glob(op.join(atlas, \"*.txt\")))\n if len(lab_files) == 0:\n # Do not create label table file\n make_table = False\n else:\n assert len(lab_files) == 1\n lab_file = lab_files[0]\n make_table = True\n\n # Resample atlas\n atlas_img_res = op.join(rsfc_subj_dir, f\"{prefix}_desc-{atlas_name}_atlas.nii.gz\")\n if not op.exists(atlas_img_res):\n roi_resample(atlas_img, atlas_img_res, clean_subj_file)\n \n # Create label table\n lab_table = op.join(rsfc_subj_dir, f\"{prefix}_desc-{atlas_name}_labtable.niml.lt\")\n if (not op.exists(lab_table)) and (make_table):\n make_label_table(lab_file, lab_table, atlas_img_res)\n\n # Calculate RSFC\n rsfc_atlas_subj = op.join(rsfc_subj_dir, f\"{prefix}_desc-{atlas_name}\")\n if not op.exists(f\"{rsfc_atlas_subj}_000.netcc\"):\n roi2roi_conn(clean_subj_file, mask_file, atlas_img_res, rsfc_atlas_subj)", "def main():\n op = help()\n for t in [\"bowtie2\", \"samtools\", \"bamToBed\"]:\n if not isTool(t):\n logger.error(\"%s not exits! Please install through conda.\" % t)\n return\n if not os.path.exists(op.fqd):\n logger.error(\"Input %s not exists! Return.\" % op.fqd)\n return\n if len(glob(op.ref + \"*.bt2\")) == 0:\n logger.error(\"Bowtie2 reference not exists for prefix of %s! Return.\" %\n op.ref)\n return\n if not os.path.exists(op.output):\n os.makedirs(op.output, exist_ok=True)\n else:\n fs = glob(os.path.join(op.output, \"*\"))\n if len(fs) > 0:\n logger.info(\n \"Target output directory %s is not empty, may over-write some files.\"\n % op.output)\n\n #mapping\n data = preFqs(op.fqd)\n if len(data) == 0:\n logger.error(\n \"No matched _R1.fastq.gz and _R2.fastq.gz in %s. Return.\" %\n (op.fqd))\n return\n ref = op.ref\n sams = Parallel(n_jobs=op.number,backend=\"multiprocessing\")(\n delayed(tracMapping)(sample, fqs, ref, op.output, cpus=op.cpu)\n for sample, fqs in data.items())\n sams = [sam for sam in sams if sam is not None]\n\n #sam to bam and bedpe\n cpus = op.number * op.cpu\n ncpus = int(min(len(sams), cpus / 2))\n bedpes = Parallel(n_jobs=ncpus,backend=\"multiprocessing\")(delayed(sam2bamBedpe)(sam) for sam in sams)\n\n #cLoops2 qc\n cmd = \"cLoops2 qc -f %s -o bedpeQc -p %s\" % (\",\".join(bedpes),\n min(len(bedpes), cpus))\n callSys([cmd], logger)\n\n #combine report\n mata = parseBowtielog()\n matb = pd.read_csv(\"bedpeQc_bedpeQc.txt\", index_col=0, sep=\"\\t\")\n matb.index = [i.split(\"_all\")[0] for i in matb.index]\n for c in matb.columns:\n mata[c] = matb[c]\n mata.to_csv(\"tracPre_summary.txt\", sep=\"\\t\")\n cmd = \"rm bedpeQc_bedpeQc.txt\"\n os.system(cmd)", "def main(runID=00, store_export='datafile', evalperday=1):\n tnow = 0\n tstart = tm.time()\n\n # get lists for keeping count of cells.\n free_naives, free_memory, GC_waiting = new_lists()\n\n # get random number objects for uniform 0-1, ints for GCs\n RNs = Rands()\n RIs = RandInts()\n\n # get the premade pool of Ab sequences that bind a chosen Ag with the given\n # distribution of binding energies. An Ag of appropriate length is made\n # directly within the sequence repertoire function\n seq_list, E_list, AgEpitope = make_shaped_repertoire(RNs)\n\n # for the required number of naive cells in the system, make Abs and append\n # to free_naive list, same for unspecific memory cells\n for n in xrange(cf.naive_pool):\n newcell = make_naive(RNs, seq_list, AgEpitope, tnow)\n free_naives.append(newcell)\n\n for n in xrange(cf.memory_pool):\n newcell = make_memory(RNs, seq_list, AgEpitope, tnow)\n free_memory.append(newcell)\n\n # get Ag level over time\n Agcurve = Ag_density()\n\n # get available LFs over time\n LFcurve = LF_presence()\n\n # open event list, event structure: (execution time, type, GC, cell list)\n event_list = []\n\n # bookkeeping - general\n l_fm = [] # free memory\n mut_list = [] # for collecting all mutations and their effects\n\n if (store_export == 'datafile' or store_export == 'dictionary'):\n l_fn = [] # free naives\n l_GCs = [[] for i in range(cf.nGCs)] # cells in each GC\n ms_times = [[] for gc in range(cf.nGCs)] # times of memory prod./GC\n ms_vals = [[] for gc in range(cf.nGCs)] # quality of memory prod./GC\n ms_fams = [[] for gc in range(cf.nGCs)] # family of memory prod./GC\n ms_muts = [[] for gc in range(cf.nGCs)] # mutations of memory prod./GC\n # external or internal data storage\n if store_export == 'datafile':\n filepath = 'raw_data/store{}.h5'.format(runID)\n store = pd.HDFStore(filepath)\n elif store_export == 'dictionary':\n store = {}\n # bookkeeping - minimal\n l_aff = [] # mean affinities\n s_aff = [] # std of affinities\n l_mut = [] # mean mutation counts\n s_mut = [] # std of mutation counts\n l_ents = [] # family entropies\n\n # timepoints at which to store the state of the simulation\n evalfac = int(12/evalperday)\n evaltimes = np.array(range(int(cf.endtime / evalfac))) * evalfac\n\n # start looping over all events at every timestep\n while tnow <= cf.endtime:\n if (store_export == 'datafile' or store_export == 'dictionary'):\n l_fm.append(len(free_memory))\n l_fn.append(len(free_naives))\n for i in range(len(l_GCs)):\n GCcount = len(GC_waiting[i])\n for event in event_list:\n if (event[1] == 'Differentiate' or event[1] == 'Divide') \\\n and event[2] == i:\n GCcount += len(event[3])\n l_GCs[i].append(GCcount)\n\n # remove cells which have died from the naive_pool\n free_naives = old_cells_die(free_naives, tnow)\n # remove cells which have died from the waiting_room\n GC_waiting = long_waiters_die(GC_waiting, tnow)\n\n # refill the naive_pool if it has fallen below standard size\n # taking care that it is not refilled instantaneously but at a speed\n # of the order of natural turnover (naive_pool/tlifeN)\n maxrefill = np.ceil(cf.naive_pool/cf.tlifeN)\n navcount = 0\n while len(free_naives) < cf.naive_pool and navcount < maxrefill:\n newcell = make_naive(RNs, seq_list, AgEpitope, tnow)\n free_naives.append(newcell)\n navcount += 1\n\n # execute list specific events if present at this timepoint\n if len(event_list) > 0:\n # check which events happen at this timepoint\n now_list = [event for event in event_list if event[0] == tnow]\n event_list = [event for event in event_list if event[0] != tnow]\n\n # execute events happening now\n for event in now_list:\n if event[1] == 'Enter':\n GC_waiting = cells_enter_GCs(GC_waiting, event[3], tnow,\n RIs)\n elif event[1] == 'Divide':\n GC_waiting[event[2]], mut_list = cell_division(\n GC_waiting[event[2]], event[3], AgEpitope, tnow,\n mut_list, RNs)\n elif event[1] == 'Differentiate':\n free_memory = free_memory + event[3]\n if (store_export == 'datafile' or\n store_export == 'dictionary'):\n for cell in event[3]:\n ms_times[event[2]].append(tnow)\n ms_vals[event[2]].append(cell.affinity)\n ms_fams[event[2]].append(cell.family)\n ms_muts[event[2]].append(cell.mutations)\n\n # activate free naive and memory cells if Ag is present in the system\n if Agcurve[tnow] > 0:\n free_naives, free_memory, event, actsum = try_activation(\n Agcurve[tnow], free_naives, free_memory, tnow, RNs)\n if event is not None:\n event_list.append(event)\n\n # select waiting cells for help signals if LFs are present\n if LFcurve[tnow] > 0:\n # perform selection for every GC separately,\n for i in range(len(GC_waiting)):\n if len(GC_waiting[i]) >= 0:\n GC_waiting[i], new_events, mut_list = select_best_waiters(\n LFcurve[tnow], GC_waiting[i], i, tnow, AgEpitope,\n mut_list, RNs)\n event_list = event_list + new_events\n\n # evaluate everything and store results if tnow in evaltimes\n if tnow in evaltimes:\n if (store_export == 'datafile' or store_export == 'dictionary'):\n meminfo = []\n for cell in free_memory:\n meminfo.append((cell.ID, cell.family, cell.sequence,\n cell.affinity, cell.affinity0,\n cell.birthtime,\n cell.mutations, cell.origin))\n memDF = pd.DataFrame(meminfo, columns=['ID', 'family',\n 'sequence', 'affinity',\n 'affinity0',\n 'birthtime',\n 'mutations', 'origin'])\n store['free_{}'.format(tnow)] = memDF\n\n for i in range(cf.nGCs):\n GCinfo = []\n for cell in GC_waiting[i]:\n GCinfo.append((cell.ID, cell.family, cell.sequence,\n cell.affinity, cell.affinity0,\n cell.birthtime, cell.mutations))\n for event in event_list:\n if (event[1] == 'Differentiate' or\n event[1] == 'Divide') and event[2] == i:\n for cell in event[3]:\n GCinfo.append((cell.ID, cell.family,\n cell.sequence,\n cell.affinity, cell.affinity0,\n cell.birthtime, cell.mutations))\n GCDF = pd.DataFrame(GCinfo, columns=['ID', 'family',\n 'sequence',\n 'affinity',\n 'affinity0',\n 'birthtime',\n 'mutations'])\n store['GC{0}_{1}'.format(i, tnow)] = GCDF\n elif store_export == 'minimal':\n l_fm.append(len(free_memory))\n afflist = [cell.affinity for cell in free_memory]\n mutatlist = [cell.mutations for cell in free_memory]\n familist = [cell.family for cell in free_memory]\n l_aff.append(np.nanmean(afflist))\n s_aff.append(np.nanstd(afflist))\n l_mut.append(np.nanmean(mutatlist))\n s_mut.append(np.nanstd(mutatlist))\n\n CC = Counter(familist)\n l_ents.append(scipy.stats.entropy(CC.values(), base=2))\n\n # increment time\n tnow += 1\n\n tend = tm.time()\n print('pure simulation time = {} s'.format(tend - tstart))\n\n if (store_export == 'datafile' or store_export == 'dictionary'):\n # put all remaining information into storage\n store['l_times'] = pd.DataFrame(np.arange(cf.endtime+1)/float(12))\n store['l_fn'] = pd.DataFrame(l_fn)\n store['l_fm'] = pd.DataFrame(l_fm)\n for i in range(len(l_GCs)):\n store['l_GCs_{}'.format(i)] = pd.DataFrame(l_GCs[i])\n store['LFcurve'] = pd.DataFrame(LFcurve)\n store['Agcurve'] = pd.DataFrame(Agcurve)\n store['mut_list'] = pd.DataFrame(mut_list)\n store['ms_fams'] = pd.DataFrame(ms_fams)\n store['ms_vals'] = pd.DataFrame(ms_vals)\n store['ms_times'] = pd.DataFrame(ms_times)\n store['ms_muts'] = pd.DataFrame(ms_muts)\n store['times'] = pd.DataFrame(evaltimes)\n store['nGCs'] = pd.DataFrame([cf.nGCs])\n store['E_list'] = pd.DataFrame(E_list)\n\n if store_export == 'datafile':\n store.close()\n return filepath\n elif store_export == 'dictionary':\n return store\n\n elif store_export == 'minimal':\n return evaltimes, l_fm, l_aff, s_aff, l_mut, s_mut, l_ents", "def main():\n\n test = argv[1]\n if test == 'test':\n dir_data = 'tests/test_data/'\n elif test == 'real':\n dir_data = 'real_data/'\n\n naf_CGI = 18\n neu_CGI = 18\n nas_CGI = 8\n nJ = 28\n nM = 28\n nA = 76\n\n print 'naf_CGI ' + str(naf_CGI)\n print 'neu_CGI ' + str(neu_CGI)\n print 'nas_CGI ' + str(nas_CGI)\n print 'nA ' + str(nA)\n print 'nJ ' + str(nJ)\n print 'nM ' + str(nM)\n\n CGI_file = str(dir_data)+'YRI9.CEU9.CHB4.chr1.phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes_snpsonly_maf0.005'\n CGIarray_file = str(dir_data)+'YRI9.CEU9.CHB4.chr1.atDNA.biAllelicSNPnoDI.genotypes_hg18_Behar_HGDP_FtDNA'\n array_file = str(dir_data)+'Behar_HGDP_FtDNA_Jews_MidEast_chr1_subset_21509'\n print CGI_file\n print CGIarray_file\n print array_file\n\n seq_real_CGI_file = AllelesReal(str(CGI_file)+'.tped')\n seqAF_CGI_bits = seq_real_CGI_file.make_bitarray_seq(0, naf_CGI)\n seqEu_CGI_bits = seq_real_CGI_file.make_bitarray_seq(naf_CGI, naf_CGI + neu_CGI)\n seqAs_CGI_bits = seq_real_CGI_file.make_bitarray_seq(naf_CGI + neu_CGI, naf_CGI + neu_CGI + nas_CGI)\n\n seq_real_CGIarray_file = AllelesReal(str(CGIarray_file)+'.tped')\n seqAf_asc_bits = seq_real_CGIarray_file.make_bitarray_seq(0, naf_CGI)\n seqEu_asc_bits = seq_real_CGIarray_file.make_bitarray_seq(naf_CGI, naf_CGI + neu_CGI)\n seqAs_asc_bits = seq_real_CGIarray_file.make_bitarray_seq(naf_CGI + neu_CGI, naf_CGI + neu_CGI + nas_CGI)\n\n seq_real_array_file = AllelesReal(str(array_file)+'.tped')\n seqJ_asc_bits = seq_real_array_file.make_bitarray_seq(0, nJ)\n seqM_asc_bits = seq_real_array_file.make_bitarray_seq(nJ, nJ + nM)\n seqA_asc_bits = seq_real_array_file.make_bitarray_seq(nJ + nM, nJ + nM + nA)\n\n res = []\n\n Af_res = []\n Af_res.extend(afs_stats_bitarray.base_S_ss(seqAF_CGI_bits, naf_CGI))\n pi_AfCGI = afs_stats_bitarray.Pi2(Af_res[3], naf_CGI)\n Af_res.append(afs_stats_bitarray.Tajimas(pi_AfCGI, Af_res[0], naf_CGI))\n del (Af_res[3])\n res.extend(Af_res)\n head = 'SegS_Af_CGI\\tSing_Af_CGI\\tDupl_Af_CGI\\tTajD_Af_CGI\\t'\n\n Eu_res = []\n Eu_res.extend(afs_stats_bitarray.base_S_ss(seqEu_CGI_bits, neu_CGI))\n pi_EuCGI = afs_stats_bitarray.Pi2(Eu_res[3], neu_CGI)\n Eu_res.append(afs_stats_bitarray.Tajimas(pi_EuCGI, Eu_res[0], neu_CGI))\n del (Eu_res[3])\n res.extend(Eu_res)\n head = head + 'SegS_Eu_CGI\\tSing_Eu_CGI\\tDupl_Eu_CGI\\tTajD_Eu_CGI\\t'\n\n As_res = []\n As_res.extend(afs_stats_bitarray.base_S_ss(seqAs_CGI_bits, nas_CGI))\n pi_AsCGI = afs_stats_bitarray.Pi2(As_res[3], nas_CGI)\n As_res.append(afs_stats_bitarray.Tajimas(pi_AsCGI, As_res[0], nas_CGI))\n del (As_res[3])\n res.extend(As_res)\n head = head + 'SegS_As_CGI\\tSing_As_CGI\\tDupl_As_CGI\\tTajD_As_CGI\\t'\n\n ##fst between populations\n res.append(afs_stats_bitarray.FST2(seqAF_CGI_bits, pi_AfCGI, naf_CGI, seqEu_CGI_bits, pi_EuCGI, neu_CGI))\n res.append(afs_stats_bitarray.FST2(seqAF_CGI_bits, pi_AfCGI, naf_CGI, seqAs_CGI_bits, pi_AsCGI, nas_CGI))\n res.append(afs_stats_bitarray.FST2(seqEu_CGI_bits, pi_EuCGI, neu_CGI, seqAs_CGI_bits, pi_AsCGI, nas_CGI))\n head = head + 'FST_AfEu_CGI\\tFST_AfAs_CGI\\tFST_EuAs_CGI\\t'\n\n ########Use Germline to find IBD on pseduo array ped and map files\n run_germline = int(argv[2])\n filenameped = str(dir_data)+'Behar_HGDP_FtDNA_Jews_MidEast_YRI9.CEU9.CHB4.chr1.ped'\n filenamemap = str(dir_data)+'Behar_HGDP_FtDNA_Jews_MidEast_YRI9.CEU9.CHB4.chr1.map'\n filenameout = str(dir_data)+'Behar_HGDP_FtDNA_Jews_MidEast_YRI9.CEU9.CHB4.chr1'\n\n print 'run germline? '+str(run_germline)\n if (run_germline == 0):\n print 'Running Germline on ' + str(filenameped) + ' ' + str(filenamemap)\n print 'p ' + str(filenameped) + ' ' + str(filenamemap) + ' ' + str(filenameout) + ' \"-bits 10\"'\n germline = Popen.wait(Popen('bash ./bin/phasing_pipeline/gline.sh ./bin/germline-1-5-1/germline ' + str(filenameped) + ' ' + str(filenamemap) + ' ' + str(filenameout) + ' \"-bits 10\"', shell=True))\n\n print 'finished running germline'\n\n ########Get IBD stats from Germline output\n if os.path.isfile(str(filenameout) + '.match'):\n print 'reading Germline IBD output'\n filegermline = open(str(filenameout) + '.match', 'r')\n IBDlengths_AA = []\n IBDlengths_JJ = []\n IBDlengths_MM = []\n IBDlengths_EE = []\n IBDlengths_AE = []\n IBDlengths_AJ = []\n IBDlengths_AM = []\n IBDlengths_JM = []\n IBDlengths_JE = []\n IBDlengths_ME = []\n for line in filegermline:\n pop1 = line.split()[0]\n pop2 = line.split()[2]\n segment = float(line.split()[10])\n pair = str(pop1) + '_' + str(pop2)\n if pair == 'EA_EA' or pair == 'WA_WA' or pair == 'EA_WA' or pair == 'WA_EA':\n IBDlengths_AA.append(segment)\n if pair == 'J_J':\n IBDlengths_JJ.append(segment)\n if pair == 'M_M':\n IBDlengths_MM.append(segment)\n if pair == 'E_E':\n IBDlengths_EE.append(segment)\n if pair == 'EA_E' or pair == 'E_EA' or pair == 'WA_E' or pair == 'E_WA':\n IBDlengths_AE.append(segment)\n if pair == 'EA_J' or pair == 'J_EA' or pair == 'WA_J' or pair == 'J_WA':\n IBDlengths_AJ.append(segment)\n if pair == 'EA_M' or pair == 'M_EA' or pair == 'WA_M' or pair == 'M_WA':\n IBDlengths_AM.append(segment)\n if pair == 'J_M' or pair == 'M_J':\n IBDlengths_JM.append(segment)\n if pair == 'J_E' or pair == 'E_J':\n IBDlengths_JE.append(segment)\n if pair == 'M_E' or pair == 'E_M':\n IBDlengths_ME.append(segment)\n filegermline.close()\n\n print 'calculating summary stats'\n\n IBDlengths_mean = []\n IBDlengths_median = []\n IBDlengths_num = []\n IBDlengths_var = []\n IBDlengths_mean30 = []\n IBDlengths_median30 = []\n IBDlengths_num30 = []\n IBDlengths_var30 = []\n\n pairs = [IBDlengths_AA, IBDlengths_JJ, IBDlengths_MM, IBDlengths_EE, IBDlengths_AE, IBDlengths_AJ,\n IBDlengths_AM, IBDlengths_JM, IBDlengths_JE, IBDlengths_ME]\n for p in pairs:\n IBDlengths_num.append(len(p))\n if len(p) < 1:\n p.append(0)\n IBDlengths_mean.append(np.mean(p))\n IBDlengths_median.append(np.median(p))\n IBDlengths_var.append(np.var(p))\n #### Get IBD greater than 30 Mb\n IBDlengths30 = []\n for l in p:\n if l > 30:\n IBDlengths30.append(l)\n IBDlengths_num30.append(len(IBDlengths30))\n if len(IBDlengths30) == 0:\n IBDlengths30.append(0)\n IBDlengths_mean30.append(np.mean(IBDlengths30))\n IBDlengths_median30.append(np.median(IBDlengths30))\n IBDlengths_var30.append(np.var(IBDlengths30))\n\n\n res.extend(IBDlengths_mean)\n head = head + 'IBD_mean_AA\\tIBD_mean_JJ\\tIBD_mean_MM\\tIBD_mean_EE\\tIBD_mean_AE\\tIBD_mean_AJ\\tIBD_mean_AM\\tIBD_mean_JM\\tIBD_mean_JE\\tIBD_mean_ME\\t'\n res.extend(IBDlengths_median)\n head = head + 'IBD_median_AA\\tIBD_median_JJ\\tIBD_median_MM\\tIBD_median_EE\\tIBD_median_AE\\tIBD_median_AJ\\tIBD_median_AM\\tIBD_median_JM\\tIBD_median_JE\\tIBD_median_ME\\t'\n res.extend(IBDlengths_num)\n head = head + 'IBD_num_AA\\tIBD_num_JJ\\tIBD_num_MM\\tIBD_num_EE\\tIBD_num_AE\\tIBD_num_AJ\\tIBD_num_AM\\tIBD_num_JM\\tIBD_num_JE\\tIBD_num_ME\\t'\n res.extend(IBDlengths_var)\n head = head + 'IBD_var_AA\\tIBD_var_JJ\\tIBD_var_MM\\tIBD_var_EE\\tIBD_var_AE\\tIBD_var_AJ\\tIBD_var_AM\\tIBD_var_JM\\tIBD_var_JE\\tIBD_var_ME\\t'\n\n res.extend(IBDlengths_mean30)\n head = head + 'IBD30_mean_AA\\tIBD30_mean_JJ\\tIBD30_mean_MM\\tIBD30_mean_EE\\tIBD30_mean_AE\\tIBD30_mean_AJ\\tIBD30_mean_AM\\tIBD30_mean_JM\\tIBD30_mean_JE\\tIBD30_mean_ME\\t'\n res.extend(IBDlengths_median30)\n head = head + 'IBD30_median_AA\\tIBD30_median_JJ\\tIBD30_median_MM\\tIBD30_median_EE\\tIBD30_median_AE\\tIBD30_median_AJ\\tIBD30_median_AM\\tIBD30_median_JM\\tIBD30_median_JE\\tIBD30_median_ME\\t'\n res.extend(IBDlengths_num30)\n head = head + 'IBD30_num_AA\\tIBD30_num_JJ\\tIBD30_num_MM\\tIBD30_num_EE\\tIBD30_num_AE\\tIBD30_num_AJ\\tIBD30_num_AM\\tIBD30_num_JM\\tIBD30_num_JE\\tIBD30_num_ME\\t'\n res.extend(IBDlengths_var30)\n head = head + 'IBD30_var_AA\\tIBD30_var_JJ\\tIBD30_var_MM\\tIBD30_var_EE\\tIBD30_var_AE\\tIBD30_var_AJ\\tIBD30_var_AM\\tIBD30_var_JM\\tIBD30_var_JE\\tIBD30_var_ME\\t'\n\n\n Af_asc = []\n ss_Af_asc = afs_stats_bitarray.base_S_ss(seqAf_asc_bits, naf_CGI)\n if (ss_Af_asc[0] == 0):\n for i in xrange(5):\n Af_asc.append(0)\n pi_Af_asc = 0\n else:\n Af_asc.extend(afs_stats_bitarray.base_S_ss(seqAf_asc_bits, naf_CGI))\n pi_Af_asc = afs_stats_bitarray.Pi2(Af_asc[3], naf_CGI)\n Af_asc.append(pi_Af_asc)\n Af_asc.append(afs_stats_bitarray.Tajimas(pi_Af_asc, Af_asc[0], naf_CGI))\n del (Af_asc[3])\n res.extend(Af_asc)\n head = head + 'SegS_Af_ASC\\tSing_Af_ASC\\tDupl_Af_ASC\\tPi_Af_ASC\\tTajD_Af_ASC\\t'\n\n Eu_asc = []\n ss_Eu_asc = afs_stats_bitarray.base_S_ss(seqEu_asc_bits, neu_CGI)\n if (ss_Eu_asc[0] == 0):\n for i in xrange(5):\n Eu_asc.append(0)\n pi_Eu_asc = 0\n else:\n Eu_asc.extend(afs_stats_bitarray.base_S_ss(seqEu_asc_bits, neu_CGI))\n pi_Eu_asc = afs_stats_bitarray.Pi2(Eu_asc[3], neu_CGI)\n Eu_asc.append(pi_Eu_asc)\n Eu_asc.append(afs_stats_bitarray.Tajimas(pi_Eu_asc, Eu_asc[0], neu_CGI))\n del (Eu_asc[3])\n res.extend(Eu_asc)\n head = head + 'SegS_Eu_ASC\\tSing_Eu_ASC\\tDupl_Eu_ASC\\tPi_Eu_ASC\\tTajD_Eu_ASC\\t'\n\n As_asc = []\n ss_As_asc = afs_stats_bitarray.base_S_ss(seqAs_asc_bits, nas_CGI)\n if (ss_As_asc[0] == 0):\n for i in xrange(5):\n As_asc.append(0)\n pi_As_asc = 0\n else:\n As_asc.extend(afs_stats_bitarray.base_S_ss(seqAs_asc_bits, nas_CGI))\n pi_As_asc = afs_stats_bitarray.Pi2(As_asc[3], nas_CGI)\n As_asc.append(pi_As_asc)\n As_asc.append(afs_stats_bitarray.Tajimas(pi_As_asc, As_asc[0], nas_CGI))\n del (As_asc[3])\n res.extend(As_asc)\n head = head + 'SegS_As_ASC\\tSing_As_ASC\\tDupl_As_ASC\\tPi_As_ASC\\tTajD_As_ASC\\t'\n\n J_asc = []\n ss_J_asc = afs_stats_bitarray.base_S_ss(seqJ_asc_bits, nJ)\n if (ss_J_asc[0] == 0):\n for i in xrange(5):\n J_asc.append(0)\n pi_J_asc = 0\n else:\n J_asc.extend(afs_stats_bitarray.base_S_ss(seqJ_asc_bits, nJ))\n pi_J_asc = afs_stats_bitarray.Pi2(J_asc[3], nJ)\n J_asc.append(pi_J_asc)\n J_asc.append(afs_stats_bitarray.Tajimas(pi_J_asc, J_asc[0], nJ))\n del (J_asc[3])\n res.extend(J_asc)\n head = head + 'SegS_J_ASC\\tSing_J_ASC\\tDupl_J_ASC\\tPi_J_ASC\\tTajD_J_ASC\\t'\n\n M_asc = []\n ss_M_asc = afs_stats_bitarray.base_S_ss(seqM_asc_bits, nM)\n if (ss_M_asc[0] == 0):\n for i in xrange(5):\n M_asc.append(0)\n pi_M_asc = 0\n else:\n M_asc.extend(afs_stats_bitarray.base_S_ss(seqM_asc_bits, nM))\n pi_M_asc = afs_stats_bitarray.Pi2(M_asc[3], nM)\n M_asc.append(pi_M_asc)\n M_asc.append(afs_stats_bitarray.Tajimas(pi_M_asc, M_asc[0], nM))\n del (M_asc[3])\n res.extend(M_asc)\n head = head + 'SegS_M_ASC\\tSing_M_ASC\\tDupl_M_ASC\\tPi_M_ASC\\tTajD_M_ASC\\t'\n\n A_asc = []\n ss_A_asc = afs_stats_bitarray.base_S_ss(seqA_asc_bits, nA)\n if (ss_A_asc[0] == 0):\n for i in xrange(5):\n A_asc.append(0)\n pi_A_asc = 0\n else:\n A_asc.extend(afs_stats_bitarray.base_S_ss(seqA_asc_bits, nA))\n pi_A_asc = afs_stats_bitarray.Pi2(A_asc[3], nA)\n A_asc.append(pi_A_asc)\n A_asc.append(afs_stats_bitarray.Tajimas(pi_A_asc, A_asc[0], nA))\n del (A_asc[3])\n res.extend(A_asc)\n head = head + 'SegS_A_ASC\\tSing_A_ASC\\tDupl_A_ASC\\tPi_A_ASC\\tTajD_A_ASC\\t'\n\n res.append(afs_stats_bitarray.FST2(seqAf_asc_bits, pi_Af_asc, naf_CGI, seqEu_asc_bits, pi_Eu_asc, neu_CGI))\n res.append(afs_stats_bitarray.FST2(seqAf_asc_bits, pi_Af_asc, naf_CGI, seqAs_asc_bits, pi_As_asc, nas_CGI))\n res.append(afs_stats_bitarray.FST2(seqEu_asc_bits, pi_Eu_asc, neu_CGI, seqAs_asc_bits, pi_As_asc, nas_CGI))\n head = head + 'FST_AfEu_ASC\\tFST_AfAs_ASC_m\\tFST_EuAs_ASC\\t'\n\n res.append(afs_stats_bitarray.FST2(seqA_asc_bits, pi_A_asc, nA, seqEu_asc_bits, pi_Eu_asc, neu_CGI))\n res.append(afs_stats_bitarray.FST2(seqA_asc_bits, pi_A_asc, nA, seqJ_asc_bits, pi_J_asc, nJ))\n res.append(afs_stats_bitarray.FST2(seqA_asc_bits, pi_A_asc, nA, seqM_asc_bits, pi_M_asc, nM))\n res.append(afs_stats_bitarray.FST2(seqM_asc_bits, pi_M_asc, nM, seqJ_asc_bits, pi_J_asc, nJ))\n head = head + 'FST_AEu_ASC\\tFST_AJ_ASC\\tFST_AM_ASC\\tFST_MJ_ASC\\n'\n\n filesummary='real_output.summary'\n filesumm=open(filesummary,'w')\n filesumm.write(head)\n\n out=''\n for g in range(len(res)):\n out=out+str(res[g])+'\\t'\n out=out[:-1]+'\\n'\n\n filesumm.write(out)\n filesumm.close()\n\n return res", "def main_loop(csd_profile, csd_seed, total_ele):\n csd_name = csd_profile.func_name\n print 'Using sources %s - Seed: %d ' % (csd_name, csd_seed)\n h = 10.\n\n #TrueCSD\n start_x, end_x, csd_res = [0.,1.,100] \n t_csd_x, true_csd = generate_csd_1D(csd_profile, csd_seed, \n start_x=start_x, \n end_x=end_x, \n res_x=csd_res)\n \n #Electrodes \n ele_res = int(total_ele) \n ele_lims = [0.10, 0.9]\n ele_pos, pots = electrode_config(ele_lims, ele_res, true_csd, t_csd_x, h)\n num_ele = ele_pos.shape[0]\n print 'Number of electrodes:', num_ele\n x_array_pots, true_pots = electrode_config(ele_lims, 100, true_csd, t_csd_x, h)\n\n #kCSD estimation\n gdX = 0.01\n x_lims = [0.,1.] #CSD estimation place\n tic = time.time() #time it\n k, est_csd, est_pot = do_kcsd(ele_pos, pots, h=h, gdx=gdX,\n xmin=x_lims[0], xmax=x_lims[1], n_src_init=300)\n toc = time.time() - tic\n\n #RMS of estimation - gives estimate of how good the reconstruction was\n chr_x, test_csd = generate_csd_1D(csd_profile, csd_seed,\n start_x=x_lims[0], end_x=x_lims[1], \n res_x=int((x_lims[1]-x_lims[0])/gdX))\n rms = np.linalg.norm(abs(test_csd - est_csd[:,0]))\n rms /= np.linalg.norm(test_csd)\n\n #Plots\n title =\"Lambda: %0.2E; R: %0.2f; CV_Error: %0.2E; RMS_Error: %0.2E; Time: %0.2f\" %(k.lambd, k.R, k.cv_error, rms, toc)\n make_plots(title, t_csd_x, true_csd, ele_pos, pots, k.estm_x, est_csd, est_pot, true_pots)\n return", "def main():\n\tdb, cursor = connect()\n\t#chroms = ['1','22']\n\t#chroms = ['2','21']\n\t#chroms = ['3','20']\n\t#chroms = ['4','19']\n\t#chroms = ['5','18']\n\t#chroms = ['6','17']\n\t#chroms = ['7','16']\n\t#chroms = ['8','15']\n\t#chroms = ['9','14']\n\t#chroms = ['10','13']\n\tchroms = ['11','12']\n\t#chroms = [str(i) for i in range(10,23)]\n\t#chroms = ['X','Y']\n\tchroms.reverse()\n\tfor chrom in chroms:\n\t\tt0 = time()\n\t\ttable = \"gnomad_freqs_chr_\" + chrom\n\t\tprint\n\t\tprint \"*\"*20\n\t\tprint table\n\t\tprint \"number of variants:\", search_db(cursor, \"select count(1) from %s\" % table)[0][0]\n\t\tqry = \"select count(1) from %s \" % table\n\t\tqry += \"where char_length(reference)=1 and char_length(variant)=1\"\n\t\tprint \"simple SNPs\", search_db(cursor, qry)[0][0]\n\n\t\tcandidates, long_vars_ct = find_complex_variants(cursor, table)\n\t\tprint\n\t\tprint \"Complex variants with reference<30:\", len(candidates),\n\t\tprint \" long variants: \", long_vars_ct\n\n\t\tclusters = find_clusters_of_candidates(candidates)\n\t\tprint\n\t\tprint \"Done clustering. Max pos:\", max([cluster[0][0] for cluster in clusters])\n\t\tprint \"Number of hotspot regions:\", len(clusters)\n\n\n\t\tnumber_of_vars_in_clusters = 0\n\t\tnumber_of_clusters_with_periodic_motifs = 0\n\t\tfor cluster in clusters:\n\t\t\t# no varaints: cluster is just the number of positions here, not the number of\n\t\t\t# vars repoted for each\n\t\t\t[start,end, number_of_variants] = characterize_region(cluster)\n\t\t\tif number_of_variants<2: continue\n\t\t\tnumber_of_vars_in_clusters += number_of_variants\n\t\t\tfixed_fields = {'chrom':chrom, 'start':start, 'end':end}\n\t\t\tstore_without_checking(cursor, 'gnomad_hotspots', fixed_fields)\n\t\tprint\n\t\tprint \"Number of variants with clusters:\", number_of_vars_in_clusters\n\t\tprint \"Number of clusters with periodic motifs:\", number_of_clusters_with_periodic_motifs\n\t\tprint\n\t\tprint \"time taken %.2f min\" % ((time() - t0) / 60.0)\n\t\tprint\n\tcursor.close()\n\tdb.close()\n\n\treturn", "def FSC2(input_dir, num_reps=50, min_sims=100000, max_ecm=20, calc_CI=False, numcores=1, scratch_mb='200', time_scratch=\"01:50:00\", mem=\"200\", print1=False, overwrite=\"None\", fsc2_path=\"/storage/plzen1/home/holcovam/programs/fsc26_linux64/fsc26\"):\n Data_Files = []\n tpl_files = []\n est_files = []\n CI_Data_Files = []\n shlist = []\n\n if input_dir.endswith(\"/\") is False:\n input_dir += \"/\"\n\n for path in os.listdir(input_dir):\n if os.path.isdir(input_dir + path) and path.startswith(\"FSC2input\"):\n samp_name = path.split(\"_\")[1]\n #folder_name = samp_name\n if samp_name + \"_DSFS.obs\" in os.listdir(input_dir + path):\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + samp_name + \"_DSFS.obs\") as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n Data_Files.append(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\")\n else:\n print(\"Did not find input data file for: \", samp_name)\n if calc_CI == \"True\":\n num_files = 0\n for file in os.listdir(input_dir + path):\n if file.endswith(\"_DSFS.obs\") and file.split(\"_\")[-2].split(\".\")[-1][0:3] == \"rep\" and file != samp_name + \"_DSFS.obs\":\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + file) as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n CI_Data_Files.append(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\")\n num_files += 1\n if len(CI_Data_Files) < 1:\n print(\"Did not find bootstrap replicates for: \", samp_name)\n else:\n print(\"Found \", num_files, \" replicate dsfs files for CI calculation for \", samp_name)\n if path.endswith(\".tpl\"):\n tpl_files.append(path)\n est_files.append(path.split(\".\")[0])\n if len(tpl_files) == 0:\n print(\"Did not find any tpl files!! Aborting!!\")\n else:\n if calc_CI == \"True\":\n Data_Files = CI_Data_Files\n for file in Data_Files:\n name = file.split(\"_DSFS\")[0]\n samp_name = name.split(\"/\")[-1]\n folder_name = samp_name [0:11]\n for tpl in tpl_files:\n tpl_name = tpl.split(\".tpl\")[0]\n if os.path.isdir(name + \"_\" + tpl_name) is False or overwrite == \"hard\":\n new_tpl = open(name + \"_\" + tpl_name + \".tpl\", 'w')\n new_data = open(name + \"_\" + tpl_name + \"_DSFS.obs\", 'w')\n\n with open(file, 'r') as data:\n for i, line in enumerate(data):\n if i == 1:\n pop_info = line.strip(\"\\n\").strip(\"\\t\").split(\"\\t\")\n pop_num = int(pop_info[0])\n samp_nums = pop_info[-pop_num:]\n new_data.write(line)\n with open(input_dir + tpl, 'r') as template:\n samp_num_lines = pop_num + 4\n for i, line in enumerate(template):\n if i < samp_num_lines:\n new_tpl.write(line)\n elif i == samp_num_lines:\n for num in samp_nums:\n new_tpl.write(num + \"\\n\")\n elif i >= samp_num_lines + len(samp_nums):\n new_tpl.write(line)\n new_est = open(name + \"_\" + tpl_name + \".est\", 'w')\n try:\n with open(input_dir + tpl_name + \".est\") as est:\n for line in est:\n new_est.write(line)\n except FileNotFoundError:\n print(\"Did not find est file for: \", tpl)\n #folder_name = samp_name ''.join(i for i in s if not i.isdigit())\n shname = name + \"_\" + tpl_name + \".sh\"\n shfile5 = open(shname, 'w')\n shfile5.write('#!/bin/bash -e\\n' +\n '#PBS -N '+samp_name+'\\n' +\n '#PBS -l walltime='+str(time_scratch)+'\\n' +\n '#PBS -l select=1:ncpus='+str(numcores)+':mem='+str(mem)+'mb:scratch_local='+str(scratch_mb)+'mb\\n' +\n '#PBS -m abe\\n' +\n '#PBS -j oe\\n\\n' +\n 'module add python-3.4.1-gcc\\n'+\n 'module add python34-modules-gcc\\n'+\n 'trap \\'clean_scratch\\' TERM EXIT\\n'+\n 'if [ ! -d \"$SCRATCHDIR\" ] ; then echo \"Scratch not created!\" 1>&2; exit 1; fi \\n' +\n 'DATADIR=\"/storage/plzen1/home/holcovam/ScanTools\"\\n' +\n 'cp $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+ \"/\" + samp_name + \"_\" + tpl_name + '* $SCRATCHDIR || exit 1\\n'+\n 'cp '+fsc2_path+' $SCRATCHDIR || exit 1\\n'+\n 'cd $SCRATCHDIR || exit 2\\n' +\n 'echo data loaded at `date`\\n\\n' +\n 'chmod +x fsc26 \\n' +\n #'ls -l \\n' +\n './fsc26 -t ' + samp_name + \"_\" + tpl_name + '.tpl -e ' + samp_name + \"_\" + tpl_name + '.est -n ' + str(min_sims) + ' -u -d -q -L ' + str(max_ecm) + ' -M \\n' + \n 'rm seed.txt \\n'+\n 'rm fsc26\\n'+\n 'rm *DSFS.obs\\n'+\n 'rm *.sh\\n'+\n 'rm *.tpl \\n'+\n 'rm *.est \\n'+\n #'ls -l \\n' +\n 'cp $SCRATCHDIR/*.par $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+' || exit 1\\n'+\n 'rm *.par \\n'+\n 'cp -r $SCRATCHDIR/* $DATADIR/'+input_dir+' || export CLEAN_SCRATCH=false\\n'+\n 'printf \"\\\\nFinished\\\\n\\\\n\"\\n')\n shfile5.close()\n shlist.append(shname)\n\n############IF PROBLEM WITH EXCESS OF NONCONVERGED CHAINS, COPY /home/majda/alpine/fastsimcoal2/afterWPSG/scripts/notConverged.py here ###################\n\n else:\n print(\"Output for \" + samp_name + \"_\" + tpl_name + \" already exists. Use hard_overwrite = True to overwrite.\")\n return shlist", "def main():\n cursor = PGCONN.cursor()\n # track our work\n with open(\"myhucs.txt\", \"w\") as fh:\n # Change the working directory to where we have data files\n os.chdir(\"../../data/%s\" % (sys.argv[2],))\n # collect up the GeoJSONs in that directory\n fns = glob.glob(\"smpldef3m_*.json\")\n fns.sort()\n i = 0\n\n for fn in fns:\n # Save our work every 100 HUC12s,\n # so to keep the database transaction\n # at a reasonable size\n if i > 0 and i % 100 == 0:\n PGCONN.commit()\n cursor = PGCONN.cursor()\n df, snapdf = get_data(fn)\n huc12 = process(cursor, fn, df, snapdf)\n fh.write(\"%s\\n\" % (huc12,))\n i += 1\n\n # Commit the database changes\n cursor.close()\n PGCONN.commit()\n LOG.info(\"Complete.\")", "def main(argv):\n args = process_command_line(argv)\n name = job_string(args)\n #That feel when no torison ;_;\n if args.dihed:\n raise Exception(\"Dihed is not supported right now\")\n #SDFS!\n if args.sdf:\n handle_sdf(args)\n #Conversion, pruning\n pybel_mols = convert_to_pybel(args.files, args.format)\n if args.pruneStart:\n pybel_mols = prune(pybel_mols, args.pruneStart)\n print \"Total number of molecules to process is\", len(pybel_mols)\n #Division\n if args.division:\n grouped_pybels = molecule_grouping.main(args.division, pybel_mols)\n else:\n grouped_pybels = [pybel_mols]\n #Run algorithm\n groups_reps, weights = run_smrs(grouped_pybels, args.dihed, args.nonH, args.energy,\n args.alpha, args.delCoordCSV, args.delCoefCSV, name)\n prune_finished = False\n #Pruning representatives\n if args.pruneFinish:\n all_reps = []\n for group in groups_reps:\n all_reps += group\n all_reps = prune(all_reps, args.pruneFinish)\n prune_finished = True\n #Save all groups into one folder\n folder_name = 'rep_' + name\n if args.folder:\n #folder creation\n while True:\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n break\n else:\n folder_name = folder_name + 'c'\n #copying\n if prune_finished:\n for mol in all_reps:\n shutil.copy(mol.title, os.getcwd() + \"/\" + folder_name)\n else:\n for group in groups_reps:\n for mol in group:\n shutil.copy(mol.title, os.getcwd() + \"/\" + folder_name)\n print \"Coeficient matrix results\"\n for i in range(len(grouped_pybels)):\n for j in range(len(grouped_pybels[i])):\n print grouped_pybels[i][j].title, weights[i][j]\n print \"\"\n print \"Rep mols\"\n for group in groups_reps:\n for mol in group:\n print mol.title\n return groups_reps", "def update_compdatabase():\n for comp_group in comp_entry:\n#\n#--- read the last set of the input data and find the last entry \n#\n past = house_keeping + comp_group + '_past'\n past = mcf.read_data_file(past)\n\n last = past[-1]\n#\n#--- find today's data entry\n#\n cmd = 'ls /data/mta_www/mp_reports/*/' + comp_group + '/data/mta*fits* >' + zspace\n os.system(cmd)\n current = mcf.read_data_file(zspace)\n\n cmd = 'mv '+ zspace + ' ' + house_keeping + comp_group + '_past'\n os.system(cmd)\n#\n#--- find the data which are not read\n#\n new_fits = []\n chk = 0\n for ent in current:\n if chk == 0:\n if ent == last:\n chk = 1\n continue\n new_fits.append(ent)\n#\n#--- uppend the data to the local fits data files\n#\n for fits in new_fits:\n [cols, tbdata] = ecf.read_fits_file(fits)\n\n time = tbdata['time']\n\n for col in cols:\n#\n#--- ignore columns with \"ST_\" (standard dev) and time\n#\n if col.lower() == 'time':\n continue\n\n mc = re.search('st_', col.lower())\n if mc is not None:\n continue\n\n mdata = tbdata[col]\n cdata = [time, mdata]\n ocols = ['time', col.lower()]\n\n ofits = out_dir + col.lower()+ '_full_data.fits'\n if os.path.isfile(ofits):\n update_fits_file(ofits, ocols, cdata)\n else:\n create_fits_file(ofits, ocols, cdata)", "def dataset_fillCohnKanade( dsFolder, ckFolder, ckEmoFolder, config, vperc=0.3, vseed=0):\n\n subjects=[ x for x in os.listdir(ckFolder) if isdir(join(ckFolder, x)) ]\n print \"INFO: %d subjects found in CK+ database\" % len(subjects)\n\n for subj in subjects:\n print \"INFO: Processing subject %s \" % subj\n \n labelFolders=[x for x in os.listdir(join(ckEmoFolder, subj)) if isdir(join(ckEmoFolder, join(subj, x)))]\n imageFolders=[x for x in os.listdir(join(ckFolder, subj)) if isdir(join(ckEmoFolder, join(subj, x)))]\n\n shots=[x for x in imageFolders if x in labelFolders]\n for s in shots:\n print \"INFO: Processing shot %s \" % s\n \n pics=[x for x in os.listdir( join(ckFolder, join(subj,s)) ) if isfile(join(ckFolder, join(subj, join(s, x))))]\n pics.sort()\n labels=[x for x in os.listdir( join(ckEmoFolder, join(subj, s)) ) if isfile( join(ckEmoFolder, join(subj, join(s, x)) )) ]\n if len(labels)<1 or len(pics)<1:\n # label forlder could contain no file at all, in this case skip the current shot or mark it as neutral?\n print \"WARN: subject %s shot %s has #%d emo labels and #%d pictures, (skip:incomplete)\" %( subj, s, len(labels), len(pics))\n continue\n emo=None\n with open(join(ckEmoFolder, join(subj, join(s, labels[0]))), \"r\") as f:\n buf=f.read()\n if len(buf)==0:\n print \"WARN: subject %s shot %s has void emo label '%s', (skip:noemo)\" % (subj, s, join(ckEmoFolder, join(subj, join(s, labels[0]))))\n # A label file could be void, in this case skip the current shot\n continue\n try:\n emo=config['CLASSES'][int(float(strip(buf)))]\n except:\n print \"ERR: cannot parse emotional label for subject %s shot %s (skip:unknown_emo)\" % (subj, s)\n continue\n\n # Last picture is the final emotion (most intense), first picture is neutral\n to_copy = [(pics[-1], emo), (pics[0], config['CLASSES'][0])]\n\n for pic, emo in to_copy:\n print \"INFO: Picture '%s' has been marked as %s\" % (pic, emo)\n orig = join(ckFolder, join(subj, join(s, pic)))\n IMAGES_FOLDER = config['TRAINING_IMAGES']\n if random.random() <= vperc:\n IMAGES_FOLDER = config['VALIDATION_IMAGES']\n dest = join(dsFolder, join(IMAGES_FOLDER, join(emo, pic)))\n try:\n shutil.copy(orig, dest)\n except:\n print \"ERR: cannot copy image '%s' to dataset '%s' \"%(orig, dest)\n continue", "def run_calcs(pattern: str, time='1d', memory='2GB', outfile='outfile'):\n cwd = os.getcwd()\n\n time = time.lower()\n if time[-1] == 'd':\n time = int(time[:-1]) * 24 * 60\n elif time[-1] == 'h':\n time = int(time[:-1]) * 60\n elif time[-1] == 'm':\n time = int(time[:-1])\n else:\n raise ValueError('Time must be given in minutes, hours, or days (e.g. 1440m, 24h, 1d).')\n\n memory = memory.upper()\n if memory[-2:] not in ['MB', 'GB']:\n raise ValueError('Memory must be given as a MB or GB (e.g. 1024MB, 1GB)')\n\n for filename in glob.glob(pattern):\n if os.path.commonpath([cwd, os.path.abspath(filename)]) != cwd:\n continue\n filename = os.path.abspath(filename)[len(cwd)+1:]\n\n _, _, orbital, *wfn = filename.split(os.sep)\n if os.path.isdir(filename):\n os.chdir(filename)\n else:\n dirname, filename = os.path.split(filename)\n os.chdir(dirname)\n submit_job = False\n\n if orbital == 'mo' and os.path.splitext(filename)[1] == '.com':\n # write script (because sbatch only takes one command)\n with open('hf_sp.sh', 'w') as f:\n f.write('#!/bin/bash\\n')\n f.write(f'g16 {filename}\\n')\n command = ['hf_sp.sh']\n submit_job = True\n elif orbital == 'mo' and os.path.splitext(filename)[1] == '.chk':\n command = ['formchk', filename]\n submit_job = False\n elif orbital == 'mo' and os.path.splitext(filename)[1] == '.fchk':\n command = [os.environ.get('HORTONPYTHON'),\n '/project/def-ayers/kimt33/fanpy/scripts/horton_gaussian_fchk.py',\n 'hf_energies.npy', 'oneint.npy', 'twoint.npy', 'fchk_file', filename]\n submit_job = False\n elif len(wfn) == 2:\n if os.path.splitext(filename)[1] == '.py':\n with open('results.sh', 'w') as f:\n f.write('#!/bin/bash\\n')\n f.write('cwd=$PWD\\n')\n f.write('for i in */; do\\n')\n f.write(' cd $i\\n')\n f.write(' python ../calculate.py > results.out\\n')\n f.write(' cd $cwd\\n')\n f.write('done\\n')\n else:\n with open('results.sh', 'w') as f:\n f.write('#!/bin/bash\\n')\n f.write(f'python ../calculate.py\\n')\n command = ['results.sh']\n submit_job = True\n\n # print(' '.join(['sbatch', f'--time={time}', f'--output={outfile}', f'--mem={memory}',\n # '--account=rrg-ayers-ab', command]))\n if submit_job:\n subprocess.run(['sbatch', f'--time={time}', f'--output={outfile}', f'--mem={memory}',\n '--account=rrg-ayers-ab', *command])\n else:\n subprocess.run(command)\n\n # change directory\n os.chdir(cwd)", "def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)", "def main():\n # the url for african daily and global daily\n african_dialy_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/africa_daily/tifs/p25/\"\n global_daily_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_daily/tifs/p25/\"\n\n\n each_year_list = GetRasterYears(url=african_dialy_url)\n new_path = makenewdir(each_year_list)\n years_new_list = fecthrasterurl(url=african_dialy_url)\n downloadwithwget(each_year_list, years_new_list, new_path)", "def run_experiment(x_loops=15, max_steps=0, display_on=True, max_fps=10,\n garden_size=8, tako_number=1, pop_max=30, max_width=1800,\n max_height=900, collect_data=True, export_all=False,\n rand_nets=False, max_gen = 505, genetic_mode=\"Plain\",\n learning_on=False, seeds=None, garden_mode=\"Diverse Static\",\n family_detection=None, family_mod=0, record_inbreeding=True,\n inbreed_lim = 1.1, hla_genes=0, binary_health=0,\n carrier_percentage=40, two_envs=False, diff_envs=False,\n migration_rate=0, phen_pref=False, filename=\"\"): \n #round width/height down to nearest multiple of 50 if need be\n if max_width % 50 != 0:\n max_width = max_width - (max_width % 50)\n if max_height % 50 != 0:\n max_height = max_height - (max_height % 50)\n\n i = 0\n #create csv files if they don't already exist\n if collect_data or export_all:\n if filename == \"\":\n filename = str(int(time.time())) + \".csv\"\n elif len(filename) < 4:\n filename = filename + \".csv\"\n elif filename[-4:] != \".csv\":\n filename = filename + \".csv\"\n\n if not os.path.exists(\"Data\"):\n os.makedirs(\"Data\")\n\n if collect_data:\n if not os.path.exists(os.path.join(\"Data\", filename)):\n with open(os.path.join(\"Data\", filename), 'a', newline='') as\\\n csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(['iteration', 'env #', 'ID', 'parent1',\n 'parent2', 'age', 'generation', '# children',\n 'mating attempts', 'accum pain',\n 'cause of death', 'timestep', 'mutations',\n 'parent_degree', 'parent_genoverlap',\n '# disorders',\n 'health a', 'health b', 'preference'])\n else:\n with open(os.path.join(\"Data\", filename), newline='') as\\\n csvfile:\n reader = csv.DictReader(csvfile)\n row = None\n for row in reader: pass\n if row != None:\n i = int(row[\"iteration\"]) + 1\n\n if export_all:\n h = make_headers()\n f = os.path.join('Data', (filename[:-4] + ' gene data.csv'))\n if not os.path.exists(f):\n with open(f, 'a') as file:\n writ = csv.writer(file)\n writ.writerow(h)\n\n tako.rand_nets = rand_nets\n tako.family_mod = family_mod\n tako.family_detection = family_detection\n gt.family_detection = family_detection\n tako.record_inbreeding = record_inbreeding\n tako.inbreed_lim = inbreed_lim\n tako.hla_genes = hla_genes\n tako.binary_health = binary_health\n tako.carrier_percentage = carrier_percentage\n tako.phen_pref = phen_pref\n gt.phen_pref = phen_pref\n \n loop_limit = x_loops\n if loop_limit < 1:\n loop_limit = 1\n\n if seeds == None:\n seeds = [None for i in range(x_loops)]\n\n while loop_limit > 0:\n #check if seeds is long enough\n if len(seeds) < loop_limit + i:\n for j in range(loop_limit + i - len(seeds)):\n seeds.append(seeds[j])\n if seeds[0] != None:\n tako.set_seed(seeds[i])\n g = garden_game(garden_size, tako_number, pop_max, max_width,\n max_height, display_on, max_fps, learning_on,\n genetic_mode, rand_nets, garden_mode, filename,\n export_all, family_mod, family_detection,\n two_envs, diff_envs, migration_rate,\n seeds[i])\n if display_on:\n main_window = g\n main_window.main_loop(max_steps, max_gen, display_on,\n collect_data, garden_mode, i)\n else:\n g.main_loop(max_steps, max_gen, display_on, collect_data,\n garden_mode, i)\n loop_limit -= 1\n i += 1", "def test_run_full(mk_tmp_dirs):\n tmp_current_path, tmp_data_path, tmp_config_path = mk_tmp_dirs\n\n cfg_dir = path.join(tmp_data_path, 'cfgs')\n collect_pipeline_cfgs(cfg_dir)\n\n asn_path = path.join(DATAPATH, 'mosaic_long_asn.json')\n args = [\n path.join(cfg_dir, 'calwebb_image3.cfg'),\n asn_path,\n ]\n\n Step.from_cmdline(args)\n\n # Check for the CRF files\n with open(asn_path) as fh:\n asn = load_asn(fh)\n expfilenames = [\n path.split(path.splitext(member['expname'])[0])[1]\n for member in asn['products'][0]['members']\n ]\n crffilenames = []\n for expfilename in expfilenames:\n name = remove_suffix(path.splitext(expfilename)[0])[0]\n crffilenames.append(name + '_a3001_crf.fits')\n for crffilename in crffilenames:\n assert path.isfile(crffilename)\n\n # Check for the level3 products\n product_name = asn['products'][0]['name']\n assert path.isfile(product_name + '_cat.ecsv')\n assert path.isfile(product_name + '_i2d.fits')", "def execute():\r\n arcpy.AddMessage(\"START BCA Processing\")\r\n arcpy.env.workspace = config.temp_data_gdb\r\n arcpy.env.overwriteOutput = True\r\n sys.path.append(config.notif_system_script_folder)\r\n\r\n # Other Variables\r\n arcpy.AddMessage(\"Import toolbox\")\r\n arcpy.ImportToolbox(config.notif_toolbox)\r\n REGEX_FOR_INVALID_CHARS = re.compile(r'[^0-9a-zA-Z]+')\r\n todayDate = datetime.datetime.now().strftime(\"%Y%m%d\")\r\n logFile = file(\r\n config.report_processing_log + \"\\\\\" + todayDate + \"_NotificationSystemLog\" + \".txt\", \"a\")\r\n\r\n\r\n # get all unzipped files uploaded to shared folder\r\n configfiles = [os.path.join(dirpath, f)\r\n for dirpath, dirnames, files in os.walk(config.SharedFolder)\r\n for f in files if f.endswith('.csv') or f.endswith('.xls') or f.endswith('.xlsx') or f.endswith('.XLS')]\r\n\r\n correct_config_files = [f for f in configfiles if \"\\BCAWeeklyPermitReport\\\\\" in f]\r\n\r\n # PREPARE workspace\r\n arcpy.AddMessage(\"Preparing workspace...\")\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExists = True\r\n break\r\n if PermitDateExists and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExistsLog = file(\r\n config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] +\r\n \" file's Permit Date already exists\" + \".log\",\r\n \"a\")\r\n PermitDateExistsLog.write(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n else:\r\n\r\n # 00. Creation of geodatabases that will serve as workspaces\r\n logFile.writelines(\"00 Creation of temp gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n if arcpy.Exists(config.TempDataGDB):\r\n arcpy.Delete_management(config.TempDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n\r\n if arcpy.Exists(config.SDEDataGDB):\r\n arcpy.Delete_management(config.SDEDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n\r\n if arcpy.Exists(config.CurrentMukimConstructDataGDB):\r\n arcpy.Delete_management(config.CurrentMukimConstructDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n\r\n logFile.writelines(\"00 Creation of temp gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 01. Import the base data\r\n logFile.writelines(\"01 Import of base data starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructByProjSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT_BYPROJ\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.DepotSource, config.SDEDataGDB, \"DepotBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.CatchmentSource, config.SDEDataGDB, \"CatchmentBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.LandlotSource, config.TempDataGDB, \"Land_lot\", \"\", \"\", \"\")\r\n # Calculate the lot key without letter\r\n arcpy.AddField_management(config.LandLot, \"Lotkey_wo_letter\", \"TEXT\", \"\", \"\", \"10\", \"\", \"NULLABLE\", \"NON_REQUIRED\",\r\n \"\")\r\n arcpy.CalculateField_management(config.LandLot, \"Lotkey_wo_letter\", \"!lot_key![:10]\", \"PYTHON\", \"\")\r\n\r\n logFile.writelines(\"01 Import of base data ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n\r\n # START THE LOOP TO PROCESS ALL THE FILES\r\n clcounter = 0\r\n\r\n if len(correct_config_files) == 0:\r\n logFile.writelines(\"No BCA report to process at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n arcpy.AddMessage(\"Processing files...\")\r\n for BCAreport in configfiles:\r\n\r\n clcounter += 1\r\n arcpy.AddMessage(BCAreport)\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n # CHEKC FILE DATE EXISTS\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in input_file_name.upper():\r\n PermitDateExists = True\r\n break\r\n\r\n HEADERVALID = True\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] == 'Error_Message':\r\n HEADERVALID = True\r\n elif sh.row_values(r)[colcount] == 'Project Ref No' or sh.row_values(r)[colcount] == 'Project_Ref_No':\r\n HEADERVALID = True\r\n else:\r\n PermitDateExistsLog = file(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[\r\n 0] + \" file's header format is not acceptable for processing\" + \".log\", \"a\")\r\n PermitDateExistsLog.write(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n HEADERVALID = False\r\n break\r\n\r\n if not PermitDateExists and HEADERVALID:\r\n logFile.writelines(\"Starts processing \" + BCAreport + \" at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"NO\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n # 02. Import the BCA report to a geodatabase table\r\n logFile.writelines(\"02 Import of table to gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n if arcpy.Exists(BCAreport[:-5] + '_err' + '.csv'):\r\n # rename old error report\r\n os.remove(BCAreport[:-5] + '_err' + '.csv')\r\n else:\r\n result = \"Error file does not exist\"\r\n if BCAreport.endswith('.xls') or BCAreport.endswith('.xlsx') or BCAreport.endswith('.XLS'):\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n fldlist = arcpy.ListFields(config.BCAReportGDBTable)\r\n fldlist.pop(0)\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] != 'Error_Message':\r\n colcount = 0\r\n else:\r\n colcount = 1\r\n break\r\n for r in range(sh.nrows):\r\n colcounter = colcount\r\n if r > 0:\r\n new_row_out = rows_out.newRow()\r\n for efld in fldlist:\r\n if efld.name <> 'OBJECTID' and efld.name <> 'ConcatFields':\r\n new_row_out.setValue(efld.name, sh.row_values(r)[colcounter])\r\n colcounter += 1\r\n\r\n logFile.writelines(\"Inserting: \" + str(new_row_out) + \"\\n\")\r\n rows_out.insertRow(new_row_out)\r\n del rows_out, new_row_out\r\n\r\n elif BCAreport.endswith('.csv'):\r\n\r\n BCAreportread = csv.DictReader(open(BCAreport, 'rb'), delimiter=',', quotechar='\"')\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n for attribute in BCAreportread:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Ref_No = attribute['Project_Ref_No']\r\n new_row_out.Project_Title = attribute['Project_Title']\r\n new_row_out.House_Blk_No = attribute['House_Blk_No']\r\n new_row_out.Road_Name = attribute['Road_Name']\r\n new_row_out.Level_No = attribute['Level_No']\r\n new_row_out.Unit_No = attribute['Unit_No']\r\n new_row_out.Building_Name = attribute['Building_Name']\r\n new_row_out.Postal_Code = attribute['Postal_Code']\r\n new_row_out.Project_Mukim_nos = attribute['Project_Mukim_nos']\r\n new_row_out.Project_Lot_nos = attribute['Project_Lot_nos']\r\n new_row_out.Permit_Type_of_Work = attribute['Permit_Type_of_Work']\r\n new_row_out.Type_of_Work = attribute['Type_of_Work']\r\n new_row_out.Owner_s_name = attribute['Owners_name']\r\n new_row_out.Owner_s_firm_name = attribute['Owners_firm_name']\r\n new_row_out.Owner_s_address = attribute['Owners_address']\r\n new_row_out.Owner_s_Tel_No = attribute['Owners_Tel_No']\r\n new_row_out.Owner_s_Email_address = attribute['Owners_Email_address']\r\n new_row_out.Builder_s_name = attribute['Builders_name']\r\n new_row_out.Builder_s_firm_name = attribute['Builders_firm_name']\r\n new_row_out.Builder_s_address = attribute['Builders_address']\r\n new_row_out.Builder_s_Tel_No = attribute['Builders_Tel_No']\r\n new_row_out.Builder_s_email_address = attribute['Builders_email_address']\r\n new_row_out.PE_s_name = attribute['PEs_name']\r\n new_row_out.PE_s_firm_name = attribute['PEs_firm_name']\r\n new_row_out.PE_s_address = attribute['PEs_address']\r\n new_row_out.PE_s_Tel_No = attribute['PEs_Tel_No']\r\n new_row_out.PE_s_Email_address = attribute['PEs_Email_address']\r\n new_row_out.Architect_s_name = attribute['Architects_name']\r\n new_row_out.Architect_s_firm_name = attribute['Architects_firm_name']\r\n new_row_out.Architect_s_address = attribute['Architects_address']\r\n new_row_out.Architect_s_Tel_No = attribute['Architects_Tel_No']\r\n new_row_out.Architect_s_Email_address = attribute['Architects_Email_address']\r\n new_row_out.Project_Cost = attribute['Project_Cost']\r\n new_row_out.Project_Duration = attribute['Project_Duration']\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = attribute['Approval_Date']\r\n rows_out.insertRow(new_row_out)\r\n if new_row_out:\r\n del new_row_out\r\n if rows_out:\r\n del rows_out\r\n\r\n except:\r\n log_error(\"Error in 02 Import of table to gdb: \", logFile)\r\n logFile.writelines(\"02 Import of table to gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 03. Remove spaces in key fields for the concatenation\r\n logFile.writelines(\"03 Removing of spaces starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpace = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n\r\n for row in rowsSpace:\r\n ProjRef = row.Project_Ref_No.strip()\r\n ProjMukim = row.Project_Mukim_nos.strip()\r\n ProjLot = row.Project_Lot_nos.strip()\r\n BuilderN = row.Builder_s_name.strip()\r\n row.Project_Ref_No = ProjRef\r\n row.Project_Mukim_nos = ProjMukim\r\n row.Project_Lot_nos = ProjLot\r\n row.Builder_s_name = BuilderN\r\n rowsSpace.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpace:\r\n del rowsSpace\r\n except:\r\n log_error(\"Error in 03 Removing of spaces: \", logFile)\r\n logFile.writelines(\"03 Removing of spaces ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 04. Concatenate Project_Ref_No, Project_Mukim_nos, Project_Lot_nos, Builder_s_name\r\n logFile.writelines(\"04 Concatenate the three fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n for row in rows:\r\n expression = str(row.Project_Ref_No) + \"-\" + str(row.Project_Mukim_nos) + \"-\" + str(\r\n row.Project_Lot_nos) + \"-\" + str(row.Builder_s_name)\r\n row.ConcatFields = expression\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n\r\n except:\r\n log_error(\"Error in 04 Concatenate the three fields: \", logFile)\r\n logFile.writelines(\"04 Concatenate the three fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 05. Create temporary tables for Unique and Duplicate records\r\n logFile.writelines(\"05 Create temporary tables starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Uniquerows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Uniquerows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Duplicaterows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Duplicaterows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n except:\r\n log_error(\"Error in 05 Create temporary tables: \", logFile)\r\n logFile.writelines(\"05 Create temporary tables ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 06. Separate unique and duplicate records\r\n logFile.writelines(\"06 Separate unique and duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n print \"Start step 06\"\r\n rows_inCB02 = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n rows_outUnique = arcpy.InsertCursor(config.UniqueRecords)\r\n # print rows_outUnique\r\n rows_outDuplicate = arcpy.InsertCursor(config.DuplicateRecords)\r\n\r\n rows_unique = []\r\n rows_duplicates = []\r\n for row in rows_inCB02:\r\n if row.ConcatFields not in rows_unique:\r\n rows_unique = rows_unique + [row.ConcatFields]\r\n else:\r\n rows_duplicates = rows_duplicates + [row.ConcatFields]\r\n\r\n print \"Start step 06 1\"\r\n for item in rows_unique:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outUnique.newRow()\r\n newrow.Concat = item\r\n # print newrow\r\n rows_outUnique.insertRow(newrow)\r\n\r\n print \"Start step 06 2\"\r\n for item in rows_duplicates:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outDuplicate.newRow()\r\n newrow.Concat = item\r\n rows_outDuplicate.insertRow(newrow)\r\n\r\n print \"Start step 06 3\"\r\n\r\n if rows_inCB02:\r\n del rows_inCB02\r\n if rows_outUnique:\r\n del rows_outUnique\r\n if rows_outDuplicate:\r\n del rows_outDuplicate\r\n if row:\r\n del row\r\n except:\r\n log_error(\"Error in 06 Separate unique and duplicate rows: \", logFile)\r\n logFile.writelines(\"06 Separate unique and duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 07. Get the rest of the fields for Uniquerows table\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB\r\n arcpy.AddMessage(\"Starting toolbox JoinUniqueRestofFields\")\r\n\r\n try:\r\n arcpy.JoinUniqueRestofFields()\r\n except:\r\n log_error(\"Error in 07 Get the rest of the fields for unique rows: \", logFile)\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 08. Get the rest of the fields for Duplicaterows table\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"START toolbox JoinDuplicateRestofFields\")\r\n try:\r\n arcpy.JoinDuplicateRestofFields()\r\n\r\n except:\r\n log_error(\"Error in 08 Get the rest of the fields for duplicate rows: \", logFile)\r\n\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 09. Log duplicate records\r\n logFile.writelines(\"09 Log duplicate records starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Logging duplicate records\")\r\n try:\r\n # Initialize the error log\r\n wbk = xlwt.Workbook()\r\n sheet = wbk.add_sheet('Book 1')\r\n row_count = 0\r\n col_count = 0\r\n header = ['Error_Message', 'Project_Ref_No', 'Project_Title', 'House_Blk_No', 'Road_Name', 'Level_No',\r\n 'Unit_No', 'Building_Name', 'Postal_Code', 'Project_Mukim_nos', 'Project_Lot_nos',\r\n 'Permit_Type_of_Work', 'Type_of_Work', 'Owners_name', 'Owners_firm_name', 'Owners_address',\r\n 'Owners_Tel_No', 'Owners_Email_address', 'Builders_name', 'Builders_firm_name',\r\n 'Builders_address', 'Builders_Tel_No', 'Builders_email_address', 'PEs_name', 'PEs_firm_name',\r\n 'PEs_address', 'PEs_Tel_No', 'PEs_Email_address', 'Architects_name', 'Architects_firm_name',\r\n 'Architects_address', 'Architects_Tel_No', 'Architects_Email_address', 'Project_Cost',\r\n 'Project_Duration', 'Approval_Date']\r\n for fieldname in header:\r\n sheet.write(row_count, col_count, fieldname)\r\n col_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n # Log duplicate records\r\n rows = arcpy.SearchCursor(config.DuplicateRows)\r\n\r\n row_count = 1\r\n col_count = 0\r\n row = None\r\n for row in rows:\r\n message = ['Duplicate record in the BCA report', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 09 Log duplicate records: \", logFile)\r\n\r\n logFile.writelines(\"09 Log duplicate records ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 10. Split rows based on Mukim numbers\r\n logFile.writelines(\"10 Splitting of rows based on mukim starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.SplittedMukimRows):\r\n arcpy.Delete_management(config.SplittedMukimRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n\r\n if arcpy.Exists(config.SplittedProjLotRows):\r\n arcpy.Delete_management(config.SplittedProjLotRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n\r\n rows_in = arcpy.SearchCursor(config.UniqueRows)\r\n rows_out = arcpy.InsertCursor(config.SplittedMukimRows)\r\n\r\n for row in rows_in:\r\n list_mukim_nos = row.Project_Mukim_nos.split(\",\")\r\n for proj_mukim_nos_id in list_mukim_nos:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Mukim_nos = proj_mukim_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.Project_Mukim_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Lot_nos = row.Project_Lot_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out.insertRow(new_row_out)\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in:\r\n del rows_in\r\n if rows_out:\r\n del rows_out\r\n except:\r\n log_error(\"Error in 10 Splitting of rows based on mukim: \", logFile)\r\n\r\n logFile.writelines(\"10 Splitting of rows based on mukim ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 11.Split rows based on Project lot numbers\r\n arcpy.AddMessage(\"Splitting rows based on project lots\")\r\n\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows_in03 = arcpy.SearchCursor(config.SplittedMukimRows)\r\n rows_out04 = arcpy.InsertCursor(config.SplittedProjLotRows)\r\n\r\n for row in rows_in03:\r\n list_proj_lot_nos = row.Project_Lot_nos.split(\",\")\r\n print list_proj_lot_nos\r\n for proj_lot_nos_id in list_proj_lot_nos:\r\n print proj_lot_nos_id\r\n new_row_out = rows_out04.newRow()\r\n new_row_out.Project_Lot_nos = proj_lot_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.PROJECTMUKIM_RAW\r\n new_row_out.PROJECTLOT_RAW = row.Project_Lot_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Mukim_nos = row.Project_Mukim_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out04.insertRow(new_row_out)\r\n\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in03:\r\n del rows_in03\r\n if rows_out04:\r\n del rows_out04\r\n # print int(arcpy.GetCount_management(SplittedProjLotRows).getOutput(0))\r\n except:\r\n log_error(\"Error in 11 Splitting of rows based on project lot: \", logFile)\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 12. Remove spaces in Mukim and Project lot values\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Cleaning project lots\")\r\n try:\r\n\r\n rowsSpaces = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.Project_Lot_nos.strip()\r\n mukim_no_spaces = row.Project_Mukim_nos.strip()\r\n row.Project_Lot_nos = lot_no_spaces\r\n row.Project_Mukim_nos = mukim_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 12 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 13. Log empty Mukimlot or date fields\r\n logFile.writelines(\r\n \"13 Log empty mukim and project lot nos starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsEmpty = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsEmpty:\r\n message = ['Missing Project lot or Mukim numbers', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n message2 = ['Missing Project duration or Approval date', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name,\r\n row.Architect_s_firm_name, row.Architect_s_address, row.Architect_s_Tel_No,\r\n row.Architect_s_Email_address, row.Project_Cost, row.Project_Duration,\r\n row.Approval_Date_DD_MM_YYYY_]\r\n if row.Project_Mukim_nos is None or (len(row.Project_Mukim_nos) < 4):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n elif row.Project_Lot_nos is None or (len(row.Project_Lot_nos) == 0):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n if row.Project_Duration is None or (len(row.Project_Duration) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n\r\n elif row.Approval_Date_DD_MM_YYYY_ is None or (len(row.Approval_Date_DD_MM_YYYY_) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsEmpty:\r\n del rowsEmpty\r\n except:\r\n log_error(\"Error in 13 Log for empty mukim and project lot nos: \", logFile)\r\n logFile.writelines(\"13 Log empty mukim and project lot nos ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 14. Error log for those with bad values\r\n arcpy.AddMessage(\"14 Logging bad values\")\r\n logFile.writelines(\"14 Log if bad values exist starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsBadValues = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsBadValues:\r\n message = ['Mukim or Project lot numbers have bad values', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n if len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Mukim_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(uptodigit(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n if row:\r\n del row\r\n if rowsBadValues:\r\n del rowsBadValues\r\n except:\r\n log_error(\"Error in 14 Log if bad values exist: \", logFile)\r\n logFile.writelines(\"14 Log if bad values exist ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 15. Add zeros for Project Lot numbers\r\n logFile.writelines(\"15 Add zeros starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsZeros = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n letters = string.ascii_letters\r\n for row in rowsZeros:\r\n letter_count = len(filter(functools.partial(operator.contains, letters), row.Project_Lot_nos))\r\n filled_string = row.Project_Lot_nos.zfill(5 + letter_count)\r\n row.Project_Lot_nos = filled_string\r\n rowsZeros.updateRow(row)\r\n if row:\r\n del row\r\n if rowsZeros:\r\n del rowsZeros\r\n except:\r\n log_error(\"Error in 15 Add zeros: \", logFile)\r\n logFile.writelines(\"15 Add zeros ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 16. Add and populate fields Mukim_Lot_No, Mukimlot_wo_letter, and Permit_date\r\n logFile.writelines(\"16 Add and populate fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsPop = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n for row in rowsPop:\r\n expression = str(row.Project_Mukim_nos) + \"-\" + str(row.Project_Lot_nos)\r\n row.Mukim_Lot_No = expression\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.Permit_date = permit_date\r\n rowsPop.updateRow(row)\r\n if row:\r\n del row\r\n if rowsPop:\r\n del rowsPop\r\n # Calculate Mukimlot_wo_letter\r\n arcpy.CalculateField_management(config.SplittedProjLotRows, \"Mukimlot_wo_letter\", \"!Mukim_Lot_No![:10]\",\r\n \"PYTHON_9.3\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 16 Add and populate fields: \", logFile)\r\n logFile.writelines(\"16 Add and populate fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 17.Match mukim lot and land lot\r\n logFile.writelines(\"17 Match mukim lot with landlot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.MatchMukimLandLot()\r\n except:\r\n log_error(\"Error in 17 Match mukim lot with landlot: \", logFile)\r\n logFile.writelines(\"17 Match mukim lot with landlot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 18.Get unmatched mukim lot with land lot\r\n logFile.writelines(\"18 Get unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"18 Get unmatched mukim lot\")\r\n try:\r\n arcpy.GetUnmatchedMukimLot()\r\n\r\n except:\r\n log_error(\"Error in 18 Get unmatched mukim lot: \", logFile)\r\n\r\n logFile.writelines(\"18 Get unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 19. Log errors for unmatched mukim lots\r\n logFile.writelines(\"19 Log unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsUnmatched = arcpy.SearchCursor(config.UnmatchedMukimLot)\r\n row = None\r\n\r\n for row in rowsUnmatched:\r\n message = ['Unmatched mukim lot with the land lot', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsUnmatched:\r\n del rowsUnmatched\r\n\r\n with xlrd.open_workbook(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\") as wb:\r\n sh = wb.sheet_by_index(0)\r\n if sh.nrows == 1:\r\n os.remove(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n except arcpy.ExecuteError:\r\n log_error(\"Error in 19 Log unmatched mukim lot: \", logFile)\r\n logFile.writelines(\"19 Log unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 20. Prepare the table for MukimConstruct matching (add required fields)\r\n logFile.writelines(\"20 Add fields to be used for matching starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n if arcpy.Exists(config.MUKIMCONSTRUCTImport):\r\n arcpy.Delete_management(config.MUKIMCONSTRUCTImport)\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n else:\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n\r\n arcpy.AddField_management(config.MatchedMukimLot, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCTImport, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS2\", \"Double\", \"\", \"\", \"\")\r\n except:\r\n log_error(\"Error in 20 Add fields to be used for matching: \", logFile)\r\n logFile.writelines(\"20 Add fields to be used for matching ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 21. Calculate Project Duration as months\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsProjDur = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsProjDur:\r\n durationstr = row.PROJ_DURATION_MTHS\r\n if \"Month\" in row.PROJ_DURATION_MTHS:\r\n durationintmth = int(durationstr.split(' ')[0])\r\n row.PROJ_DURATION_MTHS2 = durationintmth\r\n elif \"Year\" in row.PROJ_DURATION_MTHS:\r\n durationintyr = int(durationstr.split(' ')[0]) * 12\r\n row.PROJ_DURATION_MTHS2 = durationintyr\r\n rowsProjDur.updateRow(row)\r\n if rowsProjDur:\r\n del rowsProjDur\r\n if row:\r\n del row\r\n\r\n arcpy.DeleteField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"Double\")\r\n arcpy.CalculateField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"[PROJ_DURATION_MTHS2]\")\r\n except:\r\n log_error(\"Error in 21 Calculate PROJ_DURATION as months: \", logFile)\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 22. Concatenate 4 fields to be used in checking if mukimlot already exists in MUKIMCONSTRUCT\r\n logFile.writelines(\"22 Concatenate 4 fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsConcat1 = arcpy.UpdateCursor(config.MUKIMCONSTRUCTImport)\r\n\r\n for row in rowsConcat1:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat1.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat1:\r\n del rowsConcat1\r\n\r\n rowsConcat2 = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsConcat2:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat2.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat2:\r\n del rowsConcat2\r\n except:\r\n log_error(\"Error in 22 Concatenate 4 fields: \", logFile)\r\n logFile.writelines(\"22 Concatenate 4 fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 23.Match mukim lot with mukim construct\r\n logFile.writelines(\"23 Match mukimlot with mukim construct at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB # \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n try:\r\n arcpy.MatchedMukimlotMukimConstruct()\r\n except:\r\n log_error(\"Error in 23 Match mukimlot with mukim construct: \", logFile)\r\n logFile.writelines(\"23 Match mukimlot with mukim construct ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 24.Copy raw values to project lot and project mukim columns and delete the 2 fields\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values starts at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsRaw = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsRaw:\r\n row.PROJ_MUKIM_NOS = row.PROJECTMUKIM_RAW\r\n row.PROJ_LOT_NOS = row.PROJECTLOT_RAW\r\n rowsRaw.updateRow(row)\r\n if row:\r\n del row\r\n if rowsRaw:\r\n del rowsRaw\r\n except:\r\n log_error(\"Error in 24 Recalculate projlot and projmukim based on original values:\", logFile)\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values ends at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n # 25. Export Cleaned BCA Permit report for CWD\r\n logFile.writelines(\r\n \"25 Export of Cleaned BCA Permit report starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # Initialize the file\r\n CleanedBCAPermitReport = xlwt.Workbook()\r\n book = CleanedBCAPermitReport.add_sheet('Book 1')\r\n countrow = 0\r\n countcol = 0\r\n fields = ['Project Ref No', 'Project Title', 'House Blk No', 'Road Name', 'Level No', 'Unit No',\r\n 'Building Name', 'Postal Code', 'Project Mukim nos', 'Project Lot nos', 'Permit Type of Work',\r\n 'Type of Work', \"Owner's name\", \"Owner's firm name\", \"Owner's address\", \"Owner's Tel No\",\r\n \"Owner's Email address\", \"Builder's name\", \"Builder's firm name\", \"Builder's address\",\r\n \"Builder's Tel No\", \"Builder's email address\", \"PE's name\", \"PE's firm name\", \"PE's address\",\r\n \"PE's Tel No\", \"PE's Email address\", \"Architect's name\", \"Architect's firm name\",\r\n \"Architect's address\", \"Architect's Tel No\", \"Architect's Email address\", 'Project Cost',\r\n 'Project Duration', 'Approval Date(DD/MM/YYYY)']\r\n for fieldname in fields:\r\n book.write(countrow, countcol, fieldname)\r\n countcol += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n\r\n # Copy the data to Excel File\r\n data = arcpy.SearchCursor(config.MatchedMukimLot)\r\n\r\n countrow = 1\r\n countcol = 0\r\n for row in data:\r\n message = [row.PROJ_REF_NO, row.PROJ_TITLE, row.HOUSE_BLK_NO, row.ROAD_NAME, row.LEVEL_NO,\r\n row.UNIT_NO, row.BUILDING_NAME, row.POSTAL_CODE, row.PROJ_MUKIM_NOS, row.PROJ_LOT_NOS,\r\n row.PERMIT_WORK_TYPE, row.WORK_TYPE, row.OWNER_NAME, row.OWNER_FIRM_NAME, row.OWNER_ADDR,\r\n row.OWNER_TEL, row.OWNER_EMAIL, row.BUILDER_NAME, row.BUILDER_FIRM_NAME,\r\n row.BUILDER_ADDR, row.BUILDER_TEL, row.BUILDER_EMAIL, row.PE_NAME, row.PE_FIRM_NAME,\r\n row.PE_ADDR, row.PE_TEL, row.PE_EMAIL, row.ARCHITECT_NAME, row.ARCHITECT_FIRM_NAME,\r\n row.ARCHITECT_ADDR, row.ARCHITECT_TEL, row.ARCHITECT_EMAIL, row.PROJ_COST,\r\n row.PROJ_DURATION_MTHS, row.PROJ_APPROVAL_DATE]\r\n countcol = 0\r\n for element in message:\r\n book.write(countrow, countcol, element)\r\n countcol += 1\r\n countrow += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n if row:\r\n del row\r\n if data:\r\n del data\r\n except:\r\n log_error(\"Error in 25 Export of Cleaned BCA Permit Report: Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"25 Export of Cleaned BCA Permit Report ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 26. Catchment calculation\r\n arcpy.env.workspace = config.TempDataGDB\r\n logFile.writelines(\"26 Catchment calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.CatchmentCalculation()\r\n except:\r\n log_error(\"Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"26 Catchment calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 27. Depot calculation\r\n logFile.writelines(\"27 Depot calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.DepotCalculation()\r\n except:\r\n log_error(\"Error in 27 Depot calculation: \", logFile)\r\n logFile.writelines(\"27 Depot calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 28. Re-add date fields and populate\r\n logFile.writelines(\"28 Re-add date fields and populate starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PERMIT_DATE\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_APPROVAL_DATE2\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_END_DATE\", \"Date\")\r\n\r\n rows = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows:\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.PERMIT_DATE = permit_date\r\n row.PROJ_APPROVAL_DATE2 = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE, '%d/%m/%Y')\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 28 Re-add fields and populate: \", logFile)\r\n logFile.writelines(\"28 Re-add fields and populate ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 29. Calculate the end date field\r\n logFile.writelines(\"29 Calculate the end date field starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n\r\n rowsEndDate = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsEndDate:\r\n sourcedate = row.PROJ_APPROVAL_DATE2\r\n # sourcedate = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE2 , '%d/%m/%Y')\r\n months = int(row.PROJ_DURATION_MTHS)\r\n d = add_months(sourcedate, months)\r\n row.PROJ_END_DATE = d\r\n rowsEndDate.updateRow(row)\r\n if row:\r\n del row\r\n if rowsEndDate:\r\n del rowsEndDate\r\n except:\r\n log_error(\"Error in 29 Calculate the end date field: \", logFile)\r\n logFile.writelines(\"29 Calculate the end date field ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 30. Calculate Project Total Area\r\n logFile.writelines(\"30 Project total area calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.ProjectTotalArea()\r\n except:\r\n log_error(\"Error in 30 Project total area calculation: \", logFile)\r\n logFile.writelines(\"30 Project total area calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 31. Calculate the BCA_CORRECTED_BY\r\n logFile.writelines(\"31 Calculate the BCA_CORRECTED_BY starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rows_BCA_CB = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows_BCA_CB:\r\n if \"\\WSN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WSN\"\r\n elif \"\\WRN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WRN\"\r\n elif \"\\CWD\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"CWD\"\r\n rows_BCA_CB.updateRow(row)\r\n if row:\r\n del row\r\n if rows_BCA_CB:\r\n del rows_BCA_CB\r\n except:\r\n log_error(\"Error in 31 Calculate the BCA_CORRECTED_BY: \", logFile)\r\n\r\n # 32. Remove spaces in PROJ_REF_NO\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpaces = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.PROJ_REF_NO.strip()\r\n row.PROJ_REF_NO = lot_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 32 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 33. Process the Mukim Construct by Project\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.overwriteOutput = True\r\n try:\r\n MUKIM_CONSTRUCT_BYPROJ_IMPORT = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_IMPORT\"\r\n MUKIMCONBYPROJ_SORT = config.TempDataGDB + \"\\\\MUKIMCONBYPROJ_SORT\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS__2_ = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_IMPORT):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT)\r\n if arcpy.Exists(MUKIMCONBYPROJ_SORT):\r\n arcpy.Delete_management(MUKIMCONBYPROJ_SORT)\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_DISS):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n arcpy.MUKIMCONBYPROJ()\r\n # arcpy.MUKIMCONSTRUCTBYPROJProcess2()\r\n\r\n arcpy.Sort_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT, MUKIMCONBYPROJ_SORT, \"PROJ_END_DATE DESCENDING\",\r\n \"UR\")\r\n arcpy.Dissolve_management(MUKIMCONBYPROJ_SORT, MUKIM_CONSTRUCT_BYPROJ_DISS, \"PROJ_REF_NO\",\r\n \"LOT_KEY FIRST;PROJ_REF_NO FIRST;PROJ_TITLE FIRST;HOUSE_BLK_NO FIRST;ROAD_NAME FIRST;POSTAL_CODE FIRST;LEVEL_NO FIRST;UNIT_NO FIRST;BUILDING_NAME FIRST;PROJ_MUKIM_NOS FIRST;PROJ_LOT_NOS FIRST;PERMIT_WORK_TYPE FIRST;WORK_TYPE FIRST;OWNER_NAME FIRST;OWNER_FIRM_NAME FIRST;OWNER_ADDR FIRST;OWNER_TEL FIRST;OWNER_EMAIL FIRST;BUILDER_NAME FIRST;BUILDER_FIRM_NAME FIRST;BUILDER_ADDR FIRST;BUILDER_TEL FIRST;BUILDER_EMAIL FIRST;PE_NAME FIRST;PE_FIRM_NAME FIRST;PE_ADDR FIRST;PE_TEL FIRST;PE_EMAIL FIRST;ARCHITECT_NAME FIRST;ARCHITECT_FIRM_NAME FIRST;ARCHITECT_ADDR FIRST;ARCHITECT_TEL FIRST;ARCHITECT_EMAIL FIRST;PROJ_TOT_AREA FIRST;PROJ_PARENT_CWDCATCHMENT FIRST;PROJ_PARENT_WSNDEPOT FIRST;PROJ_PARENT_WRPCATCHMENT FIRST;BCA_CORRECTED_BY FIRST;PROJ_DURATION_MTHS FIRST;PROJ_COST FIRST\",\r\n \"MULTI_PART\", \"DISSOLVE_LINES\")\r\n arcpy.JoinField_management(MUKIM_CONSTRUCT_BYPROJ_DISS, \"FIRST_PROJ_REF_NO\", MUKIMCONBYPROJ_SORT,\r\n \"PROJ_REF_NO\", \"PROJ_APPROVAL_DATE;PROJ_END_DATE;PERMIT_DATE\")\r\n arcpy.CalculateField_management(MUKIM_CONSTRUCT_BYPROJ_DISS__2_, \"FIRST_PROJ_TOT_AREA\",\r\n \"[Shape_Area]/10000\", \"VB\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 33 Process the Mukim Construct by Project: \", logFile)\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"33 END process MUKIM CONSTRUCT\")\r\n\r\n # 34. Filter on-going projects\r\n\r\n logFile.writelines(\"34 Filter on-going projects starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # TempDataGDB = \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n rowsIn = arcpy.UpdateCursor(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n row = None\r\n for row in rowsIn:\r\n strdays = str(row.PROJ_END_DATE.date() - datetime.date.today())\r\n splitDays = strdays.split()\r\n if splitDays[0] == '0:00:00':\r\n result = \"On-going project (but will end today)\"\r\n else:\r\n if int(splitDays[0]) < 0:\r\n rowsIn.deleteRow(row)\r\n else:\r\n result = \"On-going project\"\r\n if rowsIn:\r\n del rowsIn\r\n if row:\r\n del row\r\n\r\n except:\r\n log_error(\"Error in 34 Filter on-going projects: \", logFile)\r\n logFile.writelines(\"34 Filter on-going projects ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 35. Append the new data to MUKIM_CONSTRUCT\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AppendNewData()\r\n except:\r\n log_error(\"Error in 35 Append the new data to MUKIM_CONSTRUCT: \", logFile)\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Clean the memory and the schema lock\r\n arcpy.RefreshCatalog(config.Notification)\r\n arcpy.Compact_management(config.TempDataGDB)\r\n gc.collect()\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"YES\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n arcpy.AddMessage(\"END BCA Processing\")\r\n arcpy.AddMessage(\"Passing file date to other functions: \" + repr(filedate))\r\n\r\n # Generate Report\r\n import ReportGeneration_Adhoc_WithProjects as gen_report\r\n gen_report.run(filedate)\r\n #\r\n # # Send email to departments\r\n # import EmailGenerationCompletion_adhoc as send_dept_notification\r\n # if \"CORRECTED\" in BCAreport.upper():\r\n # send_dept_notification.run(filedate, corrected=True)\r\n # else:\r\n # send_dept_notification.run(filedate)\r\n\r\n # Generate advisory letters\r\n import LetterGeneration as letter_gen\r\n letter_gen.run(filedate)\r\n #\r\n # # Send letters to project team\r\n # import EmailGeneration as send_advisory_email\r\n # send_advisory_email.run(filedate)\r\n\r\n\r\n # 36. Move the BCAReport in the backup folder\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n bk_file_path = os.path.join(config.BCAreportBackupFolder, input_file_name)\r\n\r\n # if the same file name exists in the backup folder, rename the new file with timestamp and move\r\n if os.path.exists(bk_file_path):\r\n\r\n new_filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M\") + input_file_name\r\n new_filepath = os.path.join(config.BCAreportBackupFolder, new_filename)\r\n shutil.copy(BCAreport, new_filepath)\r\n os.remove(BCAreport)\r\n\r\n # if the filename does not exist in the backup folder, move the file to backup\r\n else:\r\n shutil.move(BCAreport, config.BCAreportBackupFolder)\r\n\r\n logFile.writelines(\"Moved the BCA report to the backup folder at \" + str(datetime.datetime.now()) + \"\\n\")\r\n logFile.close()", "def main():\n\n\t# =========== Skim file & output file ===========\n\tskimLoc = \"$MJDDATADIR/surfmjd/analysis/skim/DS1/20160621_265313037/*.root\"\n\t# skimLoc = \"/Users/wisecg/datasets/ds1/*.root\"\n\t# wsOut = \"./output/waveSkim-1550-1650.root\"\n\twsOut = \"./output/waveSkim-1500-2000-mH-2.root\"\n\n\t# =========== Skim file cuts ===========\n\tburstCut = \"!(time_s > 2192e3 && time_s < 2195e3) && !(time_s > 7370e3 && time_s < 7371e3) && !(time_s > 7840e3 && time_s < 7860e3) && !(time_s > 8384e3 && time_s < 8387e3) && !(time_s > 8984e3 && time_s < 8985e3) && !(time_s > 9002e3 && time_s < 9005e3) && run != 13075 && run != 13093 && run != 13116\"\n\n\t# low-energy noisy runs cut - need to research & refine\n\t# runCut = \"run!=13312 && run!=13121 && run!=13004 && run!=12766 && run!=12735 && run!=12445 && run!=11175 && run!=12723 && run!=12746 && run!=12767 && run!=13071 && run!=13073 && run!=13074 && run!=13120 && run!=13205 && run!=13306 && run!=13307 && run!=9857 && run!=9862 && run!=9863\"\n\n\t# bigCut = \"channel%2==0 && mH==1 && (trapENFCal>1550 && trapENFCal<1650) && !wfDCBits && !muVeto && !isLNFill &&\" + burstCut\n\n\tbigCut = \"channel%2==0 && mH>1 && sumEH>1500 && !wfDCBits && isGood && \" + burstCut\n\n\t# =========== Ready? Go! ===========\n\tskimmer(bigCut, skimLoc, wsOut)\n\t# skimChecker(wsOut)", "def run_script(input_dir, output_dir, output_file, bstp_num):\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n print(\"\"\" Load results from step 1 & 2 \"\"\")\n start_0 = time.time()\n data_dim_file_name = output_dir + \"/temp/data_dim.mat\"\n mat = loadmat(data_dim_file_name)\n data_dim = mat['data_dim']\n data_dim = np.array([int(i) for i in data_dim[0, :]])\n n, l, m, p, g, g_num = data_dim\n y_design_file_name = output_dir + \"/temp/y_design.mat\"\n mat = loadmat(y_design_file_name)\n y_design = mat['y_design']\n resy_design_file_name = output_dir + \"/temp/resy_design.mat\"\n mat = loadmat(resy_design_file_name)\n resy_design = mat['resy_design']\n efit_eta_file_name = output_dir + \"/temp/efit_eta.mat\"\n mat = loadmat(efit_eta_file_name)\n efit_eta = mat['efit_eta']\n esig_eta_file_name = output_dir + \"/temp/esig_eta.mat\"\n mat = loadmat(esig_eta_file_name)\n esig_eta = mat['esig_eta']\n hat_mat_file_name = output_dir + \"/temp/hat_mat.mat\"\n mat = loadmat(hat_mat_file_name)\n hat_mat = mat['hat_mat']\n snp_file_name = output_dir + \"/temp/snp.mat\"\n mat = loadmat(snp_file_name)\n snp = mat['snp']\n # read the image size\n img_size_file_name = input_dir + \"img_size.txt\"\n img_size = np.loadtxt(img_size_file_name)\n img_size = np.array([int(i) for i in img_size])\n # read the image index of non-background region\n img_idx_file_name = input_dir + \"img_idx.txt\"\n img_idx = np.loadtxt(img_idx_file_name)\n img_idx = np.array([int(i) for i in img_idx])\n end_0 = time.time()\n print(\"Elapsed time in Step 3 is \", end_0 - start_0)\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n print(\"\"\" Step 3. Significant locus-voxel and locus-subregion detection \"\"\")\n start_3 = time.time()\n alpha = 1e-5\n c_alpha = -10**alpha\n bstp_num = int(bstp_num)\n max_stat_bstp, max_area_bstp = wild_bstp(snp, y_design, resy_design, efit_eta, esig_eta, hat_mat,\n img_size, img_idx, c_alpha, g_num, bstp_num)\n print(max_stat_bstp)\n print(max_area_bstp)\n bstp_out = np.hstack((max_stat_bstp, max_area_bstp))\n bstp_out_file_name = output_dir + output_file\n np.savetxt(bstp_out_file_name, bstp_out)\n end_3 = time.time()\n print(\"Elapsed time in Step 3 is \", end_3 - start_3)", "def main():\n run_test_suite('../models/iEK1008.json') # runs test suite with iEK1008.json\n\n # rewrites iEK1008.json to iMtb_H37Rv.json so original model is not overwritten\n model_iek = cobra.io.load_json_model('../models/iEK1008.json')\n cobra.io.save_json_model(model_iek, '../models/iMtb_H37Rv.json')\n model = cobra.io.load_json_model('../models/iMtb_H37Rv.json')\n\n # removes 10 imbalanced reactions from iEK1008; all 10 reactions are added back with balanced formulas during update\n rxns_to_bal = [rxn.id for rxn in model.reactions if len(rxn.check_mass_balance()) > 0\n if 'EX_' not in rxn.id and 'DM_' not in rxn.id and 'BIOMASS' not in rxn.id]\n\n for rxn_to_bal in rxns_to_bal:\n model.reactions.get_by_id(rxn_to_bal).remove_from_model()\n cobra.io.save_json_model(model, '../models/iMtb_H37Rv.json')\n\n run_test_suite('../models/iMtb_H37Rv.json', update='imbalanced_reactions_removed')\n\n # creates COBRApy Metabolite objects for new metabolites\n df_new_mets = pd.read_excel('../data/iEK1008_updates.xlsx', sheet_name='metabolites_added', usecols='A:C')\n\n new_mets = {}\n for index, row in df_new_mets.iterrows():\n new_met_id = str(row['Metabolite_ID'])\n new_met_name = row['Metabolite_Name']\n new_met_formula = row['Metabolite_Formula']\n if new_met_id.endswith('c'):\n new_met_comp = 'c'\n elif new_met_id.endswith('e'):\n new_met_comp = 'e'\n else:\n print('Metabolite compartment could not be determined. Please check metabolite id.')\n new_met_comp = ''\n new_met = cobra.Metabolite(new_met_id, name=new_met_name, formula=new_met_formula, compartment=new_met_comp)\n new_mets[new_met_id] = new_met\n\n df_new_rxns = pd.read_excel('../data/iEK1008_updates.xlsx', sheet_name='reactions_added', usecols='A:G')\n\n with alive_bar(len(df_new_rxns), bar='blocks', spinner='notes_scrolling') as bar:\n for index, row in df_new_rxns.iterrows():\n new_rxn_mets = {}\n new_rxn_form = row['Reaction_Formula']\n if ' --> ' in new_rxn_form:\n new_rxn_form = new_rxn_form.split(' --> ')\n elif ' <=> ' in new_rxn_form:\n new_rxn_form = new_rxn_form.split(' <=> ')\n else:\n print('Unexpected symbol in ' + row['Reaction_Formula'])\n\n subs = new_rxn_form[0].split(' + ')\n for sub in subs:\n if '.0' in sub:\n sub_coeff = -1 * float(sub.split(' ')[0])\n sub_id = sub.split(' ')[-1]\n try:\n new_rxn_sub = new_mets[sub_id]\n except KeyError: # metabolite is not new, i.e. already in iEK1008\n new_rxn_sub = model.metabolites.get_by_id(sub_id)\n else:\n sub_coeff = -1.0\n try:\n new_rxn_sub = new_mets[sub]\n except KeyError:\n new_rxn_sub = model.metabolites.get_by_id(sub)\n new_rxn_mets[new_rxn_sub] = sub_coeff\n\n pros = new_rxn_form[1].split(' + ')\n for pro in pros:\n if '.0' in pro:\n pro_coeff = float(pro.split(' ')[0])\n pro_id = pro.split(' ')[-1]\n try:\n new_rxn_pro = new_mets[pro_id]\n except KeyError:\n new_rxn_pro = model.metabolites.get_by_id(pro_id)\n else:\n pro_coeff = 1.0\n try:\n new_rxn_pro = new_mets[pro]\n except KeyError:\n new_rxn_pro = model.metabolites.get_by_id(pro)\n new_rxn_mets[new_rxn_pro] = pro_coeff\n\n # creates new reactions with new COBRApy Reaction and Metabolite objects\n create_reaction(model, row['Reaction_ID'], row['Reaction_Name'], row['Subsystem'], new_rxn_mets,\n float(row['Lower_Bound']), float(row['Upper_Bound']), row['Gene_Reaction_Rule'])\n\n cobra.io.save_json_model(model, '../models/iMtb_H37Rv.json')\n\n run_test_suite('../models/iMtb_H37Rv.json', update=row['Reaction_ID'])\n\n bar()\n\n return", "def consolidate(max_rounds, int_fwm,master_index, index, filename = 'data_large'):\n\n\n layer_0 = '0/0'\n filepath = 'output{}/output{}/data/'.format(master_index, index)\n file_read = filepath + filename\n file_save = filepath + filename+'_conc'\n \n # Input data, small, no need to cons\n D = read_variables(file_read, '0/0')\n save_variables(file_save, 'input', **D)\n\n if max_rounds ==0:\n max_rounds +=1\n U_cons = np.zeros([4,max_rounds, 7*int_fwm.nt], dtype = np.complex128)\n # Reading of all the oscillating spectra and sending them to a 3D array\n unfortmated_string = '{}/{}/U'\n with h5py.File(file_read+'.hdf5', 'r') as f:\n for pop in range(1,5):\n for r in range(max_rounds):\n U_cons[pop - 1,r,:] = f.get(unfortmated_string.format(pop,r)).value\n save_variables(file_save, 'results', U = U_cons) \n os.system('mv '+file_save+'.hdf5 '+file_read+'.hdf5')\n return None", "def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]", "def main():\n # Verbosity: 1=Selection Results, >1 is various debugging information\n verbose = 0\n print \"build_all.py running with verbose=%s\"%(str(verbose))\n if verbose:\n print \"Fiducial Cut: \",fid_cut_hex,\"(apethum, z_min, z_max)\"\n print \"Max Drift Distance = %.4f us\"%(max_drift_time)\n\n tree = get_data_tree(list='All') # Golden All\n \n # We use the EXOFitting processed tree to get high-level physical quantities\n # like the anticorrelated energy, etc. \n #ptree_file = ROOT.TFile(preprocessed_tree)\n #ptree = ROOT.Get(\"dataTree\")\n #if verbose: print \"Indexing EXOFitting PreProcessed tree\"\n #ptree.BuildIndex(\"runNum\", \"eventNum\")\n #if verbose: print \" ...done\"\n\n cuts = \"\"\n\n #There must be at least 1 scintillation cluster:\n #cuts = \"@fScintClusters.size()>=1\"\n #cuts = \"(fScintClusters.GetCountsOnAPDPlane(0)+fScintClusters.GetCountsOnAPDPlane(1))>20000\"\n\n # The minimum scintinlation counts must be > 20000 and <70000\n # I observe that three peaks presumable alphas are at 38500, 42200, and 55000\n # So Rn222=5.4MeV, Po218=6MeV, Po214=7.7MeV\n # calibrate:: y=mx+b, m=6167, b=5198\n #cuts = \"fScintClusters.fRawEnergy>20000 && fScintClusters.fRawEnergy<70000\"\n #cuts += \"&& fScintClusters.fRawEnergy>22000 && fScintClusters.fRawEnergy<80000\"\n #cuts += \" && Sum$(fAPDs.fRawCounts) > 8000\"\n\n # Ignore Noise and Muon tagged events\n cuts +=\"fEventHeader.fTaggedAsNoise==0 && fEventHeader.fTaggedAsMuon==0\" \n\n # That's the last of the cuts, lets show the user what the cut looks like\n print \"Applying Cuts to data: \\n%s\"%cuts\n\n #Draw is the fastest method to apply cuts, in the end what we want is a reduced data list\n # to perform a more targeted analysis...\n tree.Draw(\">>+elist_alpha_canidates\",cuts,\"goff\")\n elist_alpha_canidates = ROOT.gDirectory.Get(\"elist_alpha_canidates\")\n print \"There are %d events passing the initial cuts\"%elist_alpha_canidates.GetN()\n\n #Now we have to look at events passing the cuts individually\n tf = ROOT.TFile(\"all.root\",\"RECREATE\")\n Rntree = tree.CloneTree(0)\n \n for i in range(elist_alpha_canidates.GetN()):\n # Print Progress\n if i%int(elist_alpha_canidates.GetN()/20) == 0:\n print \"%d of %d\"%(i,elist_alpha_canidates.GetN())\n\n #Grab the event data\n tree.GetEntry(elist_alpha_canidates.GetEntry(i))\n #ed = tree.EventBranch\n #if verbose>1: print_event_data(ed,verbose)\n\n #is_alphaish = check_alpha_like(ed,verbose)\n \n #is the event a fully reconstructed BiPo?\n #is_bipo = check_full_BiPo(ed,verbose)\n\n # Case1 (position matched Bi-Po)\n #is_case1 = check_case1(ed,verbose)\n #print \"BiPo=%s, Case1=%s\"%(is_bipo, is_case1) \n #raw_input('<hit any key to continue>')\n #if is_bipo or is_alphaish:\n # Write the EventData of events which pass any of our selection criteria\n # to ROOT file\n Rntree.Fill()\n\n Rntree.AutoSave()", "def main():\n print(time.time())\n data = '../datasets/between_phase/clean_df.csv'\n print(\"Process Beginning\")\n print(\"Reading Clean CSV\")\n clean_df = pd.read_csv(data, dtype={\"journey_pattern_id\": str})\n print(clean_df.shape)\n base_table = Base_Table(clean_df)\n print(\"Adding datetime\")\n base_table.add_datetime()\n print(\"Adding Day\")\n base_table.add_day()\n print(\"Adding Hour\")\n base_table.add_hour()\n print(\"Adding Time Bin\")\n base_table.add_time_bin()\n print(\"Adding Weekend Boolean\")\n base_table.add_weekend()\n print(\"Adding Distance\")\n base_table.add_distance_feature()\n print(\"Updating Stop Id\")\n base_table.add_nearest_stop_distance()\n print(\"Filtering Data\")\n base_table.remove_null_stops()\n print(\"Adding Travel Time\")\n base_table.add_travel_time()\n print(\"Adding Congestion\")\n base_table.congestion_feature()\n bs = base_table.get_df()\n bs.to_csv('../datasets/output_files/base_table.csv')\n return bs", "def main():\n print('Calculating gas compositions.')\n resf = 'results/cell_gas' # results folder\n if not os.path.isdir(resf):\n os.makedirs(resf)\n sizex = 0.03 # sample size\n sizey = 0.02 # sample size\n sizez = 0.02 # sample size\n volume = sizex**3 # sample volume\n volume = sizex * sizey * sizez # sample volume\n # polymer density\n rhop = INPUTS['polymer_density']\n # molecular weight\n mw_ba = INPUTS['molar_mass']\n # foam density\n rhof = INPUTS['foam_density']\n # cell size for Kelvin effect on saturated vapour pressure\n dcell = INPUTS['cell_size']\n # initial weight fraction of BA\n w_ba_ini = INPUTS['initial_weight_fraction']\n names = w_ba_ini.keys()\n if 'H2O' in w_ba_ini:\n if 'CO2' in w_ba_ini:\n print(\"WARNING: H2O and CO2 are both in initial_weight_fraction.\",\n \"We will sum these contributions.\")\n else:\n w_ba_ini['CO2'] = 0\n w_ba_ini['CO2'] += w_ba_ini['H2O'] * mw_ba['CO2'] / mw_ba['H2O']\n names.append('CO2')\n names.remove('H2O')\n temps = linspace(\n INPUTS['temperature']['min'],\n INPUTS['temperature']['max'],\n INPUTS['temperature']['points']\n )\n por = 1 - rhof / rhop # porosity\n m_foam = rhof * volume # foam sample weight\n m_pol = m_foam * (1 - sum(w_ba_ini.values())) # weight of polymer\n if ARGS['--verbose']:\n print('Foam weight {0:.3f} g'.format(m_foam * 1e3))\n args = [por, mw_ba, m_foam, m_pol, volume, dcell]\n for name in names:\n with open(os.path.join(resf, 'cell_gas_{0}.csv'.format(name)),\n 'w') as csvfile:\n fieldnames = ['temp', 'pres_ba', 'w_ba_g', 'w_ba_d', 'w_ba_c']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for temp in temps:\n pres_ba, w_ba_g, w_ba_d, w_ba_c = initial_pressure(\n name, w_ba_ini[name], temp, args)\n writer.writerow(\n {'temp': temp, 'pres_ba': pres_ba, 'w_ba_g': w_ba_g,\n 'w_ba_d': w_ba_d, 'w_ba_c': w_ba_c})\n print('End.')", "def main():\n # \"\"\"Prepare neuromorphic MNIST image datasets for use in caffe\n # Each dataset will be generated with different number of unique spikes\n # \"\"\"\n # initial_size = 1e6 #best to make this big enough avoid expensive\n # re-allocation\n # test_dir = os.path.abspath('testFull')\n # train_dir = os.path.abspath('trainFull')\n\n # for num_spikes in range(150, 260, 10):\n # #test directory\n # image_dataset = generate_nmnist_dataset(initial_size, test_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'testlmdb' + str(num_spikes)\n # database = save_to_lmdb(image_dataset, output_lmdb)\n # #database.process_all_data(show_lmdb_datum)\n\n # #train directory\n # image_dataset = generate_nmnist_dataset(initial_size, train_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'trainlmdb' + str(num_spikes)\n # save_to_lmdb(image_dataset, output_lmdb)\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))\n # best to make this big enough avoid expensive re-allocation\n initial_size = 6e5\n test_dir = os.path.abspath('testFull')\n train_dir = os.path.abspath('trainFull')\n\n # test directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, test_dir)\n save_to_lmdb(image_dataset, 'testlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_test.mat')\n # database.process_all_data(show_lmdb_datum)\n\n # train directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, train_dir)\n save_to_lmdb(image_dataset, 'trainlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_train.mat')\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))", "def main():\n \n print('=== nyc taxi to airport - step 3 clean data')\n\n if os.path.exists(output_file):\n print(\"output file exists:\", output_file)\n print(\"skipping\")\n return\n\n df = load_data(input_file)\n df = clean_data(df)\n save_as_pickle_gz(df, output_file)\n \n print('done')", "def _worker():\n try:\n logger.info('Looping...')\n temp_list = []\n for file in ['data_unfcc.csv','data_ebal.csv']:\n temp_list.append(os.path.isfile(file))\n if not all(temp_list):\n print('Starting from scratch...')\n download_source()\n create_database()\n create_index()\n\n time_mod = datetime.strptime(time.ctime(os.stat('data_ebal.csv').st_mtime),'%a %b %d %H:%M:%S %Y')\n time_now = datetime.now()\n\n if (time_now - time_mod).seconds > 3600:\n download_source()\n get_updated_records('unfcc')\n get_updated_records('ebal')\n create_index()\n except Exception as e:\n logger.warning('Main Loop error')", "def main(workdir):\n dir = os.path.expanduser(workdir)\n \n #read the .dat file\n f = open('{}smi.dat'.format(dir))\n par = imp.load_source('par', '', f)\n \n #make a sdf file for visualization\n output = pybel.Outputfile(\"sdf\", dir + \"species.sdf\",overwrite=True)\n for name in par.smiles:\n smi = par.smiles[name]\n obmol = pybel.readstring(\"smi\",smi)\n output.write(obmol)\n output.close()\n \n #list with the jobs that need to be done\n jobs = []\n \n #iterate the input files\n for name in par.smiles:\n #name = input_file.replace('.inp','') #name of the calculation\n test_dir = dir + name #location where the calculations will be done\n if not os.path.exists(test_dir):\n os.mkdir(test_dir)\n \n #copy the input file to the working directory\n write_input_file(par,name,par.smiles[name],test_dir + '/input.inp')\n job = workdir + name + '/'\n jobs.append(job)\n \n run_threads(jobs, 'eric', max_running = 3)", "def GEEmacaGCMs(ptsFile,metric,timeStep,startYear,endYear,scenarios,buf,poly,models,\n username,folderOut, scalePix = 4000):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n time_d = {}\n time_d['month'] = 'projm'\n time_d['year'] = 'projy'\n \n for met in metric:\n\n for scenario in scenarios:\n\n for model in models:\n\n MACA = (ee.ImageCollection('IDAHO_EPSCOR/MACAv2_METDATA_MONTHLY')\n .select(met)\n .filterMetadata('model', 'equals', model)\n .filterMetadata('scenario', 'equals', scenario))\n\n metL = [met]\n \n years = list(range(startYear, endYear + 1))\n yearsEE = ee.List(years)\n \n if all([(timeStep == 'year'),any([(met == 'tasmin'),(met == 'tasmax'),\n (met == 'huss'),(met == 'rsds'),\n (met == 'was')])]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif (timeStep == 'month'):\n \n img_col = MACA.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n elif all([(timeStep == 'year'),(met == 'pr')]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .sum()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for MACA: ' + met + ' ' + scenario + ' ' + model)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for MACA: ' + met + ' ' + scenario + ' ' + model)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for MACA: ' + met + ' ' + scenario + ' ' + model)", "def _main_extract_CDS(args = None, stdout = None, stderr = None,\n gb_record_fmtdict = None,\n gb_cds_fmtdict = None) :\n if stdout is None :\n stdout = sys.stdout\n if stderr is None :\n stderr = sys.stderr\n if gb_record_fmtdict is None :\n gb_record_fmtdict = _GB_RECORD_FMTDICT\n if gb_cds_fmtdict is None :\n gb_cds_fmtdict = _GB_CDS_FMTDICT\n # Process arguments\n if args is None :\n parser = _makeParser_extract_CDS()\n args = parser.parse_args()\n args = _processArgsToLogic_extract_CDS(args, stdout, stderr,\n gb_record_fmtdict, gb_cds_fmtdict)\n # Go through the input files\n uniqueSeq = dict()\n i_file = 0\n for fi in args.genbank_records :\n i_file += 1\n if args.verbose :\n stderr.write(time.asctime() + \" - \" +\n \"Processing file \" + str(i_file) + \" : \" +\n os.path.basename(fi) + \" - \" +\n \"N unique seq : \" + str(len(uniqueSeq.keys())) + \"\\n\")\n record = SeqIO.parse(fi, \"genbank\")\n for r in record :\n if not args.actionFlags.get(\"DoCount\", False) :\n (summaryString, uniqueSeq, newSeq) = (\n _summarizeRecord(r, args.outfmt, args.hash, uniqueSeq))\n stdout.write(summaryString)\n else :\n count = len([x for x in r.features if x.type == \"CDS\"])\n stdout.write(r.annotations[\"gi\"] + \"\\t\" + str(count) + \"\\n\")\n # Write unique sequences\n if args.actionFlags.get(\"DoUniqueSequences\", False) :\n with open(args.unique, \"w\") as fo :\n for (k, v) in uniqueSeq.items() :\n fo.write(\">\" + k + \"\\n\")\n fo.write(v + \"\\n\")", "def main():\n print(\n \"\"\"\n\n ##########################################################\n # #\n # #\n # Compiling Colocalized Cyano Datasets #\n # #\n # #\n ##########################################################\n\n \n \"\"\"\n )\n cyanoFiles = glob.glob(f\"{COLOCALIZED_DIR}*.csv\")\n makedir(COMPILED_DIR)\n dfCompiled = pd.DataFrame({})\n for cyanoFile in cyanoFiles:\n print(f\"Compiling {cyanoFile}\")\n data = unify(cyanoFile)\n if len(dfCompiled ) < 1:\n dfCompiled = data\n else:\n dfCompiled = pd.concat([dfCompiled, data], ignore_index=True) \n dfCompiled.to_csv(f\"{COMPILED_DIR}compiled.csv\", index=False)", "def useThibautsData(log, output, bcgr=72.2, sigma=0.75, iterations=4, loc=1900, galaxies=1000,\n datadir='/Users/smn2/EUCLID/CTItesting/uniform/',\n thibautCDM03=False, beta=False, serial=1, parallel=1):\n files = g.glob(datadir + '*.fits')\n #pick randomly\n files = np.random.choice(files, galaxies, replace=False)\n\n #trap parameters: parallel\n if thibautCDM03:\n f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_parallel.dat'\n f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_serial.dat'\n params = ThibautsCDM03params()\n params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))\n else:\n f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_parallel.dat'\n f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_serial.dat'\n params = MSSLCDM03params()\n params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))\n if beta:\n params.update(dict(beta_p=0.6, beta_s=0.6))\n\n print f1, f2\n\n #store shapes\n eclean = []\n e1clean = []\n e2clean = []\n R2clean = []\n xclean = []\n yclean = []\n eCTI = []\n e1CTI = []\n e2CTI = []\n R2CTI = []\n xCTI = []\n yCTI = []\n eCTIfixed = []\n e1CTIfixed = []\n e2CTIfixed = []\n R2CTIfixed = []\n xCTIfixed = []\n yCTIfixed = []\n\n fh = open(output.replace('.pk', '.csv'), 'w')\n fh.write('#files: %s and %s\\n' % (f1, f2))\n for key in params:\n print key, params[key]\n fh.write('# %s = %s\\n' % (key, str(params[key])))\n fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\\n')\n for f in files:\n print 'Processing: ', f\n\n #load data\n nocti = pf.getdata(f)\n\n #scale to SNR about 10 (average galaxy, a single exposure)\n nocti /= np.sum(nocti)\n nocti *= 1500.\n\n #place it on canvas\n tmp = np.zeros((2066, 2048))\n ysize, xsize = nocti.shape\n ysize /= 2\n xsize /= 2\n tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize] = nocti.copy()\n\n #add background\n tmp += bcgr\n\n #run CDM03\n c = CTI.CDM03bidir(params, [])\n tmp = c.applyRadiationDamage(tmp.copy().transpose()).transpose()\n\n #remove background and make a cutout\n CTIdata = tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize]\n CTIdata -= bcgr\n CTIdata[CTIdata < 0.] = 0.\n\n #write files\n #fileIO.writeFITS(nocti, f.replace('.fits', 'noCTI.fits'), int=False)\n #fileIO.writeFITS(CTI, f.replace('.fits', 'CTI.fits'), int=False)\n\n #reset settings\n settings = dict(sigma=sigma, iterations=iterations)\n\n #calculate shapes\n sh = shape.shapeMeasurement(nocti.copy(), log, **settings)\n results = sh.measureRefinedEllipticity()\n\n eclean.append(results['ellipticity'])\n e1clean.append(results['e1'])\n e2clean.append(results['e2'])\n R2clean.append(results['R2'])\n xclean.append(results['centreX'])\n yclean.append(results['centreY'])\n\n #CTI, fitted centroid\n sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)\n results2 = sh.measureRefinedEllipticity()\n\n eCTI.append(results2['ellipticity'])\n e1CTI.append(results2['e1'])\n e2CTI.append(results2['e2'])\n R2CTI.append(results2['R2'])\n xCTI.append(results2['centreX'])\n yCTI.append(results2['centreY'])\n\n #fixed centroid\n settings['fixedPosition'] = True\n settings['fixedX'] = results['centreX']\n settings['fixedY'] = results['centreY']\n settings['iterations'] = 1\n sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)\n results3 = sh.measureRefinedEllipticity()\n\n eCTIfixed.append(results3['ellipticity'])\n e1CTIfixed.append(results3['e1'])\n e2CTIfixed.append(results3['e2'])\n R2CTIfixed.append(results3['R2'])\n xCTIfixed.append(results3['centreX'])\n yCTIfixed.append(results3['centreY'])\n\n text = '%s,%e,%e,%e,%e,%e,%e\\n' % (f, results['ellipticity'] - results2['ellipticity'],\n results['e1'] - results2['e1'], results['e2'] - results2['e2'],\n results['R2'] - results2['R2'],\n results['centreX'] - results2['centreX'],\n results['centreY'] - results2['centreY'])\n fh.write(text)\n print text\n\n fh.close()\n\n results = {'eclean': np.asarray(eclean),\n 'e1clean': np.asarray(e1clean),\n 'e2clean': np.asarray(e2clean),\n 'R2clean': np.asarray(R2clean),\n 'xclean': np.asarray(xclean),\n 'yclean': np.asarray(yclean),\n 'eCTI': np.asarray(eCTI),\n 'e1CTI': np.asarray(e1CTI),\n 'e2CTI': np.asarray(e2CTI),\n 'R2CTI': np.asarray(R2CTI),\n 'xCTI': np.asarray(xCTI),\n 'yCTI': np.asarray(yCTI),\n 'eCTIfixed': np.asarray(eCTIfixed),\n 'e1CTIfixed': np.asarray(e1CTIfixed),\n 'e2CTIfixed': np.asarray(e2CTIfixed),\n 'R2CTIfixed': np.asarray(R2CTIfixed),\n 'xCTIfixed': np.asarray(xCTIfixed),\n 'yCTIfixed': np.asarray(yCTIfixed)}\n\n #save to a file\n fileIO.cPickleDumpDictionary(results, output)\n\n return results", "def main():\n sql = MySQLdb.connect(host=\"commonswiki.labsdb\", db=\"commonswiki_p\", read_default_file=\"~/replica.my.cnf\", charset='utf8')\n cursor = sql.cursor()\n for country in countries:\n start_time = time.time()\n print \"---- Fetching files for: %s\" % country\n wlm = WlmContest(country, cursor)\n wlm.get_files()\n print \"---- fetched in %03.3f seconds\" % (time.time() - start_time)", "def main():\n parser = argparse.ArgumentParser(description=\n 'Clear BACKFILE entries for set of spectra')\n parser.add_argument('specroot', help='Directory stem for spectra')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='verbose mode')\n\n args = parser.parse_args()\n specroot = args.specroot\n verbose = args.verbose\n if verbose:\n print '\\nFix spectra with stem: {}'.format(os.path.abspath(specroot))\n\n # Get number of spectra to update (count ungrouped spectrum files)\n pattern = os.path.basename(specroot) + r'_src[0-9]+\\.pi'\n files = [f for f in os.listdir(os.path.dirname(os.path.abspath(specroot)))\n if re.match(pattern, f)]\n print '{} files will be modified'.format(len(files)*2)\n\n # Set file header for each spectra (grouped and ungrouped)\n if verbose:\n print '\\nUpdating file headers...'\n for num in xrange(len(files)):\n set_bkg(num+1, specroot, verbose) # Numbering starts at 1\n\n if verbose:\n print '\\nDone!'", "def main() -> None:\n # What is each dataset's coverage of arXiv?\n arxiv = pd.read_gbq(f'select * from {DATASET}.dataset_arxiv_coverage', project_id='gcp-cset-projects')\n write_latest(arxiv, OUTPUT_DIR / 'arxiv_coverage.csv')\n\n for x in CITATION_PERCENTILES:\n for arxiv_only in [True, False]:\n table = f'country_share_{x - 1}th{\"_arxiv\" if arxiv_only else \"\"}'\n df = pd.read_gbq(f'select * from {DATASET}.{table}', project_id='gcp-cset-projects')\n write_latest(df, OUTPUT_DIR / f'{table}.csv')\n plot_country_shares(df, OUTPUT_DIR / f'{table}.png')\n country_share_min = pd.read_gbq(f'select * from {DATASET}.country_share_99th_arxiv_min',\n project_id='gcp-cset-projects')\n write_latest(country_share_min, OUTPUT_DIR / f'country_share_99th_arxiv_min.csv')\n plot_country_shares(country_share_min, OUTPUT_DIR / f'country_share_99th_arxiv_min.png')\n # Without any citation threshold for inclusion\n country_shares = pd.read_gbq(f'select * from {DATASET}.country_shares', project_id='gcp-cset-projects')\n write_latest(country_shares, OUTPUT_DIR / f'country_shares.csv')\n plot_country_shares(country_shares, OUTPUT_DIR / f'country_shares.png')\n # DS/MAG/WOS only\n for dataset in ['ds', 'mag', 'wos']:\n dataset_country_shares = pd.read_gbq(f'select * from {DATASET}.country_share_99th_{dataset}',\n project_id=PROJECT_ID)\n write_latest(dataset_country_shares, OUTPUT_DIR / f'country_shares_{dataset}.csv')\n plot_country_shares(dataset_country_shares, OUTPUT_DIR / f'country_shares_{dataset}.png')\n\n df = pd.read_gbq(f'select * from {DATASET}.mag_replication', project_id='gcp-cset-projects')\n df = df.query('country != \"Other\"')\n import plotly.express as px\n fig = px.line(df, x='Year', y='proportion', color='country', range_y=(0, .5))\n fig.show()\n\n # Summarize overlap between predictions by method\n overlap_counts = calculate_overlap('summary')\n write_latest(overlap_counts, OUTPUT_DIR / 'overlap_counts.csv')\n overlap_1pct_counts = calculate_overlap('summary_1pct', columns=[\n 'keyword_hit', 'elsevier_hit', 'subject_hit', 'arxiv_scibert_hit', 'arxiv_scibert_cl_hit',\n 'arxiv_scibert_cv_hit', 'arxiv_scibert_ro_hit'])\n write_latest(overlap_1pct_counts, OUTPUT_DIR / 'overlap_arxiv_99th_counts.csv')\n overlap_arxiv_1pct_min_counts = calculate_overlap('summary_arxiv_1pct_min')\n write_latest(overlap_arxiv_1pct_min_counts, OUTPUT_DIR / 'overlap_arxiv_99th_min_counts.csv')\n\n # Assess divergence between methods/models by subject\n # Keyword hits alone\n kw_only_subjects = pd.read_gbq(f'select wos_subject, ds_subject, mag_subject, count(*) as count '\n f'from {DATASET}.comparison '\n f'where keyword_hit is true and elsevier_hit is false and scibert_hit is false '\n f'group by 1, 2, 3 '\n f'order by 4 desc',\n project_id='gcp-cset-projects')\n write_latest(kw_only_subjects, OUTPUT_DIR / 'divergence_subjects_keywords.csv')\n # Elsevier alone\n elsevier_only_subjects = pd.read_gbq(f'select wos_subject, ds_subject, mag_subject, count(*) as count '\n f'from {DATASET}.comparison '\n f'where keyword_hit is false and elsevier_hit is true and scibert_hit is false '\n f'group by 1, 2, 3 '\n f'order by 4 desc',\n project_id='gcp-cset-projects')\n write_latest(elsevier_only_subjects, OUTPUT_DIR / 'divergence_subjects_elsevier.csv')\n # SciBERT hits alone\n scibert_only_subjects = pd.read_gbq(f'select wos_subject, ds_subject, mag_subject, count(*) as count '\n f'from {DATASET}.comparison '\n f'where keyword_hit is false and elsevier_hit is false and scibert_hit is true '\n f'group by 1, 2, 3 '\n f'order by 4 desc',\n project_id='gcp-cset-projects')\n write_latest(scibert_only_subjects, OUTPUT_DIR / 'divergence_subjects_scibert.csv')\n # SciBERT hits alone with arXiv coverage\n arxiv_scibert_only_subjects = pd.read_gbq(f'select wos_subject, ds_subject, mag_subject, count(*) as count '\n f'from {DATASET}.comparison '\n f'where keyword_hit is false and elsevier_hit is false and arxiv_scibert_hit is true '\n f'group by 1, 2, 3 '\n f'order by 4 desc',\n project_id='gcp-cset-projects')\n write_latest(arxiv_scibert_only_subjects, OUTPUT_DIR / 'divergence_subjects_arxiv_scibert.csv')\n\n mag_ai = calculate_overlap('mag_ai_fields_overlap', columns=['has_mag_id', 'mag_ai_hit', 'arxiv_scibert_hit'])\n mag_ai['label'] = mag_ai['label'].str.replace('Has_Mag_Id', 'MAG')\n mag_ai['label'] = mag_ai['label'].str.replace('Mag_Ai', 'MAG AI')\n mag_ai['label'] = mag_ai['label'].str.replace('Arxiv_Scibert', 'SciBERT')\n write_latest(mag_ai, OUTPUT_DIR / 'mag_ai_overlap.csv')\n\n # Ancillary table: summarize overlap across datasets\n dataset_overlap = calculate_overlap('dataset_overlap', columns=['in_wos', 'in_ds', 'in_mag'])\n dataset_overlap['label'] = dataset_overlap['label'].str.replace('In_', '').str.upper()\n write_latest(dataset_overlap, OUTPUT_DIR / 'dataset_overlap.csv')\n\n # Summarize overlap across datasets, by whether articles were predicted positive by SciBERT\n do_scibert = calculate_overlap('dataset_overlap_by_prediction',\n columns=['scibert_hit', 'in_wos', 'in_ds', 'in_mag'])\n # This requires some cleanup, because calculate_overlap wasn't written to do overlap + group-by\n do_scibert['label'] = do_scibert['label'].str.replace('In_', '').str.upper()\n do_scibert['label'] = do_scibert['label'].str.replace('SCIBERT . ', '').str.upper()\n do_scibert = do_scibert.query(\"label != 'SCIBERT'\")\n do_scibert = do_scibert.pivot_table(index=['label', 'in_ds', 'in_wos', 'in_mag'], columns='scibert_hit')\n do_scibert = do_scibert.sort_values(['in_wos', 'in_ds', 'in_mag'])\n # Recalculate percentages calculate_overlap did cell count / n, but we want column percentages for easy comparison\n # of overlap between positive and negative predictions\n for pred in [True, False]:\n do_scibert[('Pct', pred)] = do_scibert[('Count', pred)] / do_scibert[('Count', pred)].sum()\n write_latest(do_scibert, OUTPUT_DIR / 'dataset_overlap_by_prediction.csv', index=True)", "def main():\n # Path used in assembly and previously discovered min year value.\n split_in_dir_path = \"../../data/split\"\n avg_5_in_dir_path = \"../../data/averaged_5\"\n avg_25_in_dir_path = \"../../data/averaged_25\"\n avg_50_in_dir_path = \"../../data/averaged_50\"\n dates_mat_path = \"../../data/dates_matrix/dates_matrix.npy\"\n min_year = 1962\n data_out_dir_path = \"../../data/rnn_set/data\"\n labels_out_dir_path = \"../../data/rnn_set/labels\"\n assemble_set(\n split_in_dir_path, avg_5_in_dir_path, avg_25_in_dir_path,\n avg_50_in_dir_path, dates_mat_path, min_year,\n data_out_dir_path, labels_out_dir_path\n )", "def main():\n\n # Script arguments... \n \"\"\" If running as standalone, hardcode theWorkspace and inFile \"\"\"\n theWorkspace = arcpy.GetParameterAsText(0)\n if not theWorkspace:\n theWorkspace = r\"d:\\_dataTest\"\n arcpy.env.workspace = theWorkspace\n arcpy.env.overwriteOutput = True\t\n\n inFile = arcpy.GetParameterAsText(1)\n if not inFile:\n inFile = \"updateMultipleSourcePaths.csv\"\n inFile = r\"\\\\dfg.alaska.local\\gis\\Anchorage\\GISStaff\\___gisStaffConnections\\RepairBrokenSrcAug242015.csv\"\n\n outWorkspace = arcpy.GetParameterAsText(2)\n if not outWorkspace:\n outWorkspace = os.path.join(theWorkspace, \"_repaired\")\n '''if not os.path.isdir(outWorkspace): \n os.makedirs(outWorkspace)\n myMsgs(\"created new directory {0} \\n\".format(outWorkspace))'''\n\n # Create .txt Report of what it thinks was fixed, tagged with YYYYMMDD_HHMM\n outFile = \"FixedReport\"\n fileDateTime = curFileDateTime()\n currentDate = curDate()\n outfileTXT = os.path.join(theWorkspace, outFile) + fileDateTime + \".txt\" \n myMsgs (outFile)\n reportFile = open(outfileTXT, 'w')\n myMsgs( \"File {0} is open? {1}\".format(outfileTXT, str(not reportFile.closed)))\n outText = \"Report for what it THINKS it repaired in {0}, on {1} \\n \".format(theWorkspace, currentDate)\n outText += \" Includes coverages (pts, poly, arc, anno), shapes, and FGDB data.\" + '\\n'\n outText += \"-----------------------------------------------------\" + '\\n' \n reportFile.write(outText)\t\n\n mxd = None\n outMXDName = \"none\"\n updatePath = []\n cvrList = [r\"\\arc\", r\"\\polygon\", r\"\\region\", r\"\\point\", r\"\\tic\" ]\n lstExtDatatype = [[\".shp\", \"SHAPEFILE_WORKSPACE\" ], [\".sde\",\"SDE_WORKSPACE\"], \n [\".mdb\", \"ACCESS_WORKSPACE\" ], [\".gdb\", \"FILEGDB_WORKSPACE\"], \n [\"cover\", \"ARCINFO_WORKSPACE\"]]\t\n cntMXD = 0\n cntFixed = 0\n cntTotalFixed = 0\n\n # makes sure the .csv file exists\n if arcpy.Exists(inFile):\n myMsgs (\"->Using {0} to repair paths.\\n==============================\".format(inFile))\n # walks thru the workspace to create list of files \n for root, dirs, files in os.walk(theWorkspace): \t\t\n for fileName in files:\n if root == outWorkspace: # don't process mxd's in the target directory\n pass\n else:\n fullPath = os.path.join(root, fileName)\n basename, extension = os.path.splitext(fileName)\n # Only process .mxd files\n if extension == \".mxd\":\n myMsgs(\"\\nReviewing MXD: {0}\".format(fullPath))\n reportFile.write(\"\\nReviewing MXD: {0}\".format(fullPath))\n mxd = arcpy.mapping.MapDocument(fullPath)\n dfs = arcpy.mapping.ListDataFrames(mxd)\n cntMXD += 1\n cntFixed = 0\n basename, extension = os.path.splitext(fileName)\n # New output mxd name....\n outMXDName = os.path.join(outWorkspace, (str(basename) + \".mxd\")) #\"_fix.mxd\"))\n # create list of the tables since they are handle differently\n theTables = arcpy.mapping.ListTableViews(mxd)\n # Loops thru dataframes so adding and deleting Services will work.\n for df in dfs:\n # Loops thru layers, checks for broken links and tries to repair\n lyrList = arcpy.mapping.ListLayers(mxd, \"\", df)\n for lyr in lyrList:\n if lyr.isBroken:\n if not lyr.supports(\"DATASOURCE\") and not lyr.isServiceLayer:\n myMsgs(\" ->Skipping {0} not a Service layer, and does not support DATASOURCE\".format(lyr.name))\n pass #continue\n elif not lyr.supports(\"DATASOURCE\") and lyr.isServiceLayer:\n myMsgs(\" -Broken Service: {0}\".format(lyr.name))\n else:\n myMsgs(\" -Broken: {0}\".format(lyr.dataSource))\n #myMsgs(\"layer is Group {0} or ServiceLayer {1}\".format(lyr.isGroupLayer, lyr.isServiceLayer))\n if (lyr.isGroupLayer or (\"Events\" in lyr.name)) and (not lyr.isServiceLayer): # Groups and Event FC skipped\n myMsgs(\" ...skipping group or event: {0}\".format(lyr.name))\n reportFile.write(\"\\n *skipping group or event: {0} \\n\".format(lyr.name))\n pass #break\n elif lyr.isServiceLayer: # services might have to be handle differently\n if lyr.supports(\"SERVICEPROPERTIES\"):\n for spType, spName in lyr.serviceProperties.iteritems():\n myMsgs(\" Service Properties: {0}: {1}\".format(spType, spName ))\n if spType == \"URL\": \n dataSource = str(spName)\n lyrType = (\"service_{}\".format(lyr.name))\n break\n myMsgs(\" ->this ia a service....using add and remove layer\")\n updatePath = findUpdatePath(inFile, dataSource, lyrType.strip().lower())\n newDSPath, newDSName = os.path.split(updatePath[0])\n if (\"service\" in updatePath[3]) and (\"service\" in updatePath[1]):\n insertLayer = arcpy.mapping.Layer(updatePath[0])\n print(\"dataframe: {0}\".format(df))\n arcpy.mapping.InsertLayer(df, lyr, insertLayer, \"AFTER\")\n arcpy.mapping.RemoveLayer(df, lyr)\n reportFile.write(\"\\n ->sees this as service....{0} \\n\".format(dataSource))\n # will still look at deleted version after insert, not the new version..\n # isBroken will give false info even if fixed, so \n # don't use myMsgs(\"Still broken? {0}\".format(lyr.isBroken)) \n else:\n myMsgs(\" --> a service layer but no SERVICE PROPERTIES\")\n elif lyr.supports(\"DATASOURCE\") and lyr.supports(\"DATASETNAME\"): \n # not a group, event or what it thinks is a service\n updatePath = findUpdatePath(inFile, lyr.dataSource, \"\")\n newDSPath, newDSName = os.path.split(updatePath[0])\n sameType = updatePath[2] \n for cvr in cvrList: #checks to see if the source layer is a coverage...must handle different\n if cvr in lyr.dataSource:\n sourceIsCoverage = True\n break\n else:\n sourceIsCoverage = False\n # updatePath[1] is False if there wasn't a match\n # so \"not update[1]\" means no match was found, and moves to next layer\t\t\t\t\t\t\t\t\n if not updatePath[1]: # if no match was found\n myMsgs(\" !! no match to: {0} \".format(lyr.dataSource))\n updateStatus = \"no match, not changed\" # used for message only\n pass\n elif updatePath[1].strip().lower() == \"drive\":\n myMsgs(\" skipping drive-letter matches for now: {0}\".format(lyr.dataSource))\n updateStatus = \"can only find drive match...look into it)\"\n pass\n elif updatePath[1].strip().lower() == \"_review\":\n myMsgs(\" no new source assigned yet for: {0}\".format(lyr.dataSource))\n updateStatus = (\"review and update {0}\".format(inFile))\n pass\n else: #if lyr.supports(\"DATASOURCE\") and lyr.supports(\"DATASETNAME\"):\n updateStatus = str(updatePath[0]) # used for message only\n if lyr in theTables:\n #myMsgs(\" thinks its a table....using findAndReplsWorkspacePath\")\n myMsgs(\" *Moving {0}: {1} to new: {2}\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n reportFile.write(\"\\n Moving {0}: {1} to new: {2} \\n\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n lyr.findAndReplaceWorkspacePath(lyr.dataSource, updatePath, False) \n elif lyr.isRasterLayer:\n #myMsgs(\" thinks its a raster....using findAndReplsWorkspacePath\")\n myMsgs(\" *Moving {0}: {1} to new: {2}\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n reportFile.write(\"\\n Moving {0}: {1} to new: {2} \\n\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n newType = \"RASTER_WORKSPACE\"\n for extType in lstExtDatatype:\n if extType[0] in updatePath[0]:\n newType = extType[1] \n if extType[0] == '.gdb':\n newDSPath = newDSPath.split('.gdb', 1)[0] + '.gdb'\n #newType = extType[1]\n elif extType[0] == '.sde':\n newDSPath = newDSPath.split('.sde', 1)[0] + '.sde'\n break \n lyr.replaceDataSource(newDSPath, newType, newDSName, False)\n if not sameType:\n testOldTOC = updatePath[4].strip('\\\\')\n if lyr.name == testOldTOC:\n lyr.name = lyr.datasetName\n else:\n newType = updatePath[1] \n if sourceIsCoverage and sameType:\n newDSPath = os.path.split(newDSPath)[0]\n newType = \"ARCINFO_WORKSPACE\"\n for extType in lstExtDatatype:\n if extType[0] in updatePath[0]:\n newType = extType[1]\n if extType[0] == '.gdb':\n newDSPath = newDSPath.split('.gdb', 1)[0] + '.gdb'\n #newType = extType[1]\n elif extType[0] == '.sde':\n newDSPath = newDSPath.split('.sde', 1)[0] + '.sde'\n\n break\n print(\"line ~281 newType is: {0}\".format(newType))\n myMsgs(\" *Moving {0}: {1} to new: {2}\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n reportFile.write(\"\\n Moving {0}: {1} to new: {2}\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n lyr.replaceDataSource(newDSPath, newType, newDSName, False)\n #myMsgs(\" new datasource: {0}\".format(lyr.dataSource))\n myMsgs(\" **the new data source: {0}\".format(updateStatus))\n cntFixed += 1\n myMsgs(\" Still broken? {0}\".format(lyr.isBroken))\n else:\n myMsgs(\"not sure what it is, but can't process {0}\".format(lyr.name))\n \n else:\n myMsgs(\" -Not Broken: {0}\".format(str(lyr)))\n\n myMsgs(\" Number of links fixed processed: {0}\".format(cntFixed))\n myMsgs(\" -{0} Review complete.\".format(fullPath))\n reportFile.write(\" -Number of links fixed processed: {0} \\n\".format(cntFixed))\t\t\t\t\t\t\n reportFile.write(\" -{0} Review complete. \\n\\n\".format(fullPath))\n\n if cntFixed > 0:\n mxd.save()\n myMsgs(\"saved to {0}\".format(fullPath))\n reportFile.write(\"saved to {0}\".format(fullPath))\n cntTotalFixed += cntFixed\n cntFixed = 0\n \"\"\"if cntFixed > 0:\n\t\t\t\t\t\t\tmxd.saveACopy(outMXDName, '10.1')\n\t\t\t\t\t\t\tmyMsgs(\"saved to {0}\".format(outMXDName))\n\t\t\t\t\t\t\tcntFixed = 0\"\"\"\n '''if arcpy.Exists(outMXDName):\n outMXDName.()\n myMsgs(\"saved 1\")\n else:\n mxd.saveACopy(outMXDName, '10.1')\n myMsgs(\"saved 2\")'''\n del mxd\n cntFixed = 0\n else:\n myMsgs (\"ERROR: Required repair source list: [0] does not exit. \\n\".format(inFile))\n outText = (\"\\n\\n ==========================================\")\n outText += (\"\\n Number of MXD's processed: {0} \\n\".format(cntMXD))\n outText += (\" Total Number of links it fixed, all mxds: {0} \\n\".format(cntTotalFixed) )\n\n myMsgs(\" {0}\".format(outText))\n\n reportFile.write(outText)\n # close the .txt file, \n reportFile.close()\n myMsgs( \"File {0} is closed? {1}\".format(outfileTXT, str(reportFile.closed)))\t\n\n myMsgs('!!! Success !!! ')", "def main():\n file_one_path, file_two_path, output_path =\\\n get_command_line_arguments(\n ['/home/ehler002/project/groups/go/Data/Cluster_Data/Dataset.txt',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/translated_genes.fpkm_table',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/Full_fpkm_Table.txt'])\n pattern = 'CRO_T'\n for file_path in [file_one_path, file_two_path]:\n assert os.path.exists(file_path), 'File %s does not exist.' % file_path\n start_time = datetime.datetime.now()\n print('Started concatenation at %s' % start_time)\n file_contents, headers = get_file_contents(file_two_path)\n file_contents = sort_file_contents(file_contents)\n file_contents = remove_pattern(file_contents, pattern)\n concatenate_files(file_one_path, file_contents, headers, output_path)\n print('Finished concatenation in %s' % (datetime.datetime.now() - start_time))", "def main(raw_args=None):\r\n args = get_args(raw_args)\r\n dataset = DATASETS[args.dataset]\r\n dataset.set_work_dir(args.path)\r\n\r\n dataset_dir = os.path.join(dataset.get_data_path(), consts.RAW_DATA_DIR)\r\n input_file = os.path.join(dataset_dir, args.input)\r\n if args.output:\r\n out_name = args.output\r\n else:\r\n out_name = OUT_FILE % (args.chr, format_number(args.size))\r\n output_file = os.path.join(dataset_dir, out_name)\r\n\r\n print(\"Output will be in: %s\" % output_file)\r\n\r\n entries = set()\r\n out = open(output_file, 'w')\r\n input = open(input_file, 'r')\r\n\r\n is_first = True\r\n for line in input:\r\n if is_first:\r\n add_headers(line.strip(), out)\r\n is_first = False\r\n else:\r\n entries.add(line)\r\n input.close()\r\n print(\"Done reading\")\r\n\r\n # Keeps track of spacers that were already included.\r\n spacers = set()\r\n # Keeps track of the number of samples already obtained.\r\n sampled = 0\r\n # Keeps track of progress for printing.\r\n chunk = 1\r\n\r\n # While we have not sampled enough...\r\n while sampled < args.size:\r\n remaining = args.size - sampled\r\n # Sample more entries.\r\n new_entries = sample(entries, spacers, remaining, dataset)\r\n sampled += len(new_entries)\r\n for entry in new_entries:\r\n out.write(common.to_csv_line(*entry))\r\n\r\n # If we have passed a progress checkpoint - prints progress.\r\n if (args.size / PRINT_NUM) * chunk < sampled:\r\n print(\"Sampled: %d\" % sampled)\r\n chunk += 1\r\n\r\n print(\"Done\")\r\n print(STATS)", "def main():\n\n in_file = ('/home/desi2/candidatesp9/asteroids_decals_dr2.fits')\n out_dir = os.path.join(os.environ.get('HOME'), 'asteroid_cutouts/')\n\n cand_info = fits_table(in_file)\n # Pre-select asteroids in the ra, dec box you know they exist.\n ramin = 107\n ramax = 130\n decmin = 16\n decmax = 30\n these = np.where((cand_info.ra0>ramin)*(cand_info.ra0<ramax)*\n (cand_info.dec0>decmin)*(cand_info.dec0<decmax))[0]\n #pdb.set_trace() # Runs Python Debugger on code up to this line. \n cand_info = cand_info[these]\n\n urls = []\n jpgfiles = []\n for ii in range(100):\n print('Working on candidate {}'.format(ii))\n ra = cand_info.ra0[ii]\n dec = cand_info.dec0[ii]\n \n jpgurl = 'http://legacysurvey.org/viewer/jpeg-cutout-decals-dr2?ra={:.6f}&dec={:.6f}&pixscale=0.262&size=200'.format(ra, dec)\n \n jpgfile = 'obj-{:03d}.jpg'.format(ii)\n jpgfile = os.path.join(out_dir, jpgfile)\n grab = 'wget --continue -O {:s} \"{:s}\"' .format(jpgfile, jpgurl)\n print(grab)\n os.system(grab)\n #pdb.set_trace() # Runs Python Debugger on code up to this line. \n if os.stat(jpgfile).st_size < 18000: # Remove partial or empty images\n # The cut on filesize takes care of most of the bad images but\n # leaves some behind. If the restriction is any larger,\n # it can remove some valid files.\n os.remove(jpgfile)\n else:\n print(jpgurl)\n jpgfiles.append(jpgfile)\n urls.append(jpgurl)\n # for HTML file. What should the URL be?\n #print('<html>')\n #print('<head> Planet Nine Candidates </head>')\n #print('<body>')\n #for thisurl, thisjpg in zip(urls, jpgfiles):\n # print('<div class=\"image\">')\n # print('<a href=\"{}\"><img src=\"{:s}\"></a>'.format(thisurl, thisjpg))\n # print('<div class=\"caption\"> Image of {:s} </div>' .format(thisjpg))\n # print('</div>')\n #print('</body></html>')", "def main():\n subjectlist = ['hel{}'.format(i) for i in range(1, 20) if i is not 9]\n logfile = setup_log(os.path.join(os.environ['hel'], 'logs',\n 'randomise_setup_fslmerge'))\n logfile.info('Setup for randomise.')\n logfile.info('Making a 4D data set by combining images')\n outdir = os.path.join(os.environ['hel'], 'graph_analyses',\n 'randomise_global_connectivity')\n for subclust_n in range(1, 4):\n outfilename = os.path.join(outdir,\n 'knnward_clst1_subclust{}_4Dfile'.format(\n subclust_n))\n mergefsl(logfile, make_file_list(subjectlist, subclust_n), outfilename)", "def run_main_test():\r\n\r\n print(\"\"\"\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n +++ Performing Main LZJD Full File Test +++\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n \"\"\")\r\n # iterate over the files in the directory\r\n for f in listdir(SRC):\r\n if isfile(join(SRC, f)):\r\n # prepare a dictionary with the digests ready to compare\r\n DIGESTS[f] = {'src': None, 'r2': None, 'ghidra': None}\r\n\r\n # calculate digest of src file\r\n DIGESTS[f]['src'] = digest(join(SRC, f))\r\n\r\n # name adjustment\r\n f2 = f.replace(\".c\", \".o\")\r\n\r\n # calculate digest of ghidra and r2 outputs\r\n DIGESTS[f]['ghidra'] = digest(join(GHIDRA_PATH, GHIDRA_NAME.format(f2)))\r\n DIGESTS[f]['r2'] = digest(join(R2DEC_PATH, R2DEC_NAME.format(f2)))\r\n\r\n # obtain the similarity from source\r\n SCORES[f] = {'ghidra': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['ghidra']),\r\n 'r2': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['r2']),\r\n 'x': get_lzjd_sim(DIGESTS[f]['ghidra'], DIGESTS[f]['r2'])}\r\n\r\n gidra_doms = 0\r\n for f in SCORES:\r\n print(\"{0:12}: Scores G:{1:20} R2:{2:20} X:{3:20} D:{4:20}\".format(f,\r\n SCORES[f]['ghidra'],\r\n SCORES[f]['r2'],\r\n SCORES[f]['x'],\r\n SCORES[f]['ghidra'] - SCORES[f]['r2']))\r\n if SCORES[f]['ghidra'] > SCORES[f]['r2']:\r\n gidra_doms += 1\r\n print(\"Ghidra Dominated on {} files\".format(gidra_doms))\r\n # This section of code prepares visualizations on the data for easy analysis\r\n plot_scatter(SCORES, title=\"LZJD Full File scores\")\r\n\r\n # obtian the scores as input data to the plots\r\n bxplt_data_gd = [score['ghidra'] for score in SCORES.values()]\r\n bxplt_data_r2 = [score['r2'] for score in SCORES.values()]\r\n\r\n # run pairwise t test\r\n print(\"Performing T-Test on LZJD Distance of files\")\r\n run_ttest(bxplt_data_gd, bxplt_data_r2)", "def collectInitialeccnStatistics_onefile(self, folder, databaseFilename, multiplicityFactor = 1.0, deformedNuclei = False):\n typeCollections = ((1, 'sn'), (2,'en'))\n for ecc_id, ecc_type_name in typeCollections:\n db = SqliteDB(path.join(folder, databaseFilename % ecc_type_name))\n # first write the ecc_id_lookup table, makes sure there is only one such table\n if db.createTableIfNotExists(\"ecc_id_lookup\", ((\"ecc_id\",\"integer\"), (\"ecc_type_name\",\"text\"))):\n db.insertIntoTable(\"ecc_id_lookup\", (ecc_id, ecc_type_name))\n\n # next create the eccentricities and collisionParameters table\n db.createTableIfNotExists(\"eccentricities\", ((\"event_id\",\"integer\"), (\"ecc_id\", \"integer\"), (\"n\",\"integer\"), (\"ecc_real\",\"real\"), (\"ecc_imag\",\"real\")))\n db.createTableIfNotExists(\"collisionParameters\", ((\"event_id\",\"integer\"), (\"Npart\", \"integer\"), (\"Ncoll\",\"integer\"), (\"b\",\"real\"), (\"total_entropy\",\"real\")))\n if(deformedNuclei):\n db.createTableIfNotExists(\"deformationParameters\", ((\"event_id\",\"integer\"), (\"cosTheta1\", \"real\"), (\"phi1\",\"real\"), (\"cosTheta2\",\"real\"), (\"phi2\",\"real\")))\n\n # the big loop\n data = loadtxt(path.join(folder, '%s_ecc_eccp_10.dat' %(ecc_type_name)))\n Npart = data[:, 36]\n Ncoll = data[:, 37]\n dSdy = data[:, 38]/multiplicityFactor #scale out the multiplicity factor used in superMC\n b = data[:, 39]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"collisionParameters\", (event_id, int(Npart[event_id]), int(Ncoll[event_id]), float(b[event_id]), float(dSdy[event_id])))\n if(deformedNuclei):\n cosTheta1 = data[:, 40]\n phi1 = data[:, 41]\n cosTheta2 = data[:, 42]\n phi2 = data[:, 43]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"deformationParameters\", (event_id, float(cosTheta1[event_id]), float(phi1[event_id]), float(cosTheta2[event_id]), float(phi2[event_id])))\n for iorder in range(1,10):\n eccReal = data[:, 4*iorder - 2]\n eccImag = data[:, 4*iorder - 1]\n for event_id in range(len(eccReal)):\n db.insertIntoTable(\"eccentricities\",(event_id, ecc_id, iorder, float(eccReal[event_id]), float(eccImag[event_id])))\n\n # close connection to commit changes\n db.closeConnection()", "def main(self, argv):\n\n np.random.seed(42)\n self.setup_logging()\n self.command_line(argv)\n start_time = time.time()\n\n logging.info(\"Starting Kaggle-CTMI Experiment\\n\")\n\n logging.info(\"Finding data and groundtruth...\")\n cohort = Cohort(self.shaip)\n train_cohort, test_cohort = cohort.split_cohort_train_test(0.3)\n logging.info(\"Found %d datasets\", cohort.size)\n\n if self.args.train:\n logging.info(\"Training on %d datasets...\", train_cohort.size)\n model = self.algorithm.train(train_cohort)\n Algorithm.save_model(model, self.shaip.models_dir + 'model')\n else:\n logging.info(\"Skipping training, model saved from earlier run\")\n model = self.algorithm.load_model(self.shaip.models_dir + 'model')\n\n if self.args.predict:\n logging.info(\"Prediction on %d datasets...\", test_cohort.size)\n test_predictions = self.algorithm.predict(model, test_cohort)\n else:\n logging.info(\"Skipping prediction, using predictions from earlier run\")\n # TODO: need to sort out caching of predictions\n test_predictions = None\n\n if self.args.evaluate:\n logging.info(\"Generating results to ShaipWorkspace/outputs/results/index.html...\")\n self.results.show_results(train_cohort, test_cohort,\n self.algorithm.history, test_predictions)\n\n logging.info(\"Kaggle-CTMI Experiment done in %4.1f seconds.\\n\", (time.time() - start_time))", "def main():\n lake_drivers = Dynamic_Lake_Drivers()\n #lake_drivers.prepare_orography_ICE5G_0k_uncorrected()\n #lake_drivers.prepare_orography_ICE5G_0k_corrected()\n #lake_drivers.prepare_orography_ICE6G_21k_corrected()\n #lake_drivers.prepare_river_directions_with_depressions_from_glac1D()\n #lake_drivers.evaluate_glac1D_ts1900_basins()\n #import time\n # start = time.time()\n #lake_drivers.evaluate_ICE6G_lgm_basins()\n # end = time.time()\n # print(end - start)\n #lake_drivers.prepare_basins_from_glac1D()\n #lake_drivers.extract_lake_volumes_from_glac1D_basins()\n #lake_drivers.connect_catchments_for_glac1D()\n lake_drivers.connect_catchments_for_transient_run()\n #lake_drivers.extract_volumes_for_transient_run()\n #lake_drivers.add_10min_rmouth_to_transient_data()\n #lake_drivers.expand_transient_data_catchments_to_include_rmouth()\n #lake_drivers.remove_no_data_values_from_upscaled_MERIT_correction_set()\n #lake_drivers.remove_disconnected_points_from_slm()", "def __init__(self, out_dir = 'output' ):\n\n self.data = {} # will contain the data for each different dataset \n self.datasets = '' # will contain the input datasets (original dictionary)\n self.datasets_keys = '' # will contain the input datasets names only (i.e. keys of the datasets dictionary)\n #self.datasets_all = ['era5_2_2'] # all possibly available datasets \n\n self.unique_dates = {} \n self.attributes = {} # will keep the original attributes from the CDM tables, read from the netCDF files \n self.id_string_length = 14 # fixed length for record_id and observation_id values \n self.out_dir = out_dir \n self.variable_types = {}\n self.observation_ids_merged = { 'igra2':b'3' , \n 'ncar':b'4', \n 'bufr':b'5', \n 'era5_1':b'1' , \n 'era5_2':b'2', \n 'era5_1759' :b'6' , \n 'era5_1761':b'7' , \n 'era5_3188' :b'8' } # values used to convert original record_id to the merged record_id, see method merge_all_data \n\n logging.info('*** Initialising the Merging procedure ***' ) \n #self.era5b_columns = [] # stores the columns of the era5fb \n self.standard_cdm = [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type', 'station_configuration_codes'] \n self.slice_size = 3000\n self.index_offset = 0 # will be replaced when running \n self.hour_time_delta = 60 * 60 * 2 # decide up to which time shift records are considered identical \n \n \n self.only_std_plevels = False # set to True to store only standard pressure level data \n self.std_plevs = [1000, 2000, 3000, 5000, 7000, 10000, 15000, 20000, 25000, 30000, 40000, 50000, 70000, 85000, 92500, 100000]", "def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)", "def main():\n df_path = './DuReader_reformatted/DuReader_for_dbCombinedPara500-150-sample10000.csv'\n df = pd.read_csv(df_path, sep='\\t', index_col=0).dropna() # drop 2 nan question and 8 nan title\n epoch = 5 # about 6 hours\n total_time_list, back_end_time_list = get_time_avg(df['question'].tolist(), epoch)\n df['time_avg'] = total_time_list\n df['backend_time'] = back_end_time_list\n new_df_path = os.path.splitext(df_path)[0] + '-whole-epoch-' + str(epoch) + '.csv'\n df.to_csv(new_df_path, sep='\\t')\n print('file successfully saved to ', new_df_path)", "def workflow(save_dir):\n year = 2016\n month_series = range(1, 13)\n total_potential_biomass_multiplier = 48.8\n total_standing_biomass_multiplier = 45.25\n biomass_jitter = 3.\n diet_sufficiency_multiplier = 0.28\n diet_sufficiency_jitter = 0.01\n avg_animal_density = 0.0175\n animal_density_jitter = 0.005\n\n # twelve months of precipitation rasters covering the study area\n precip_basename_list = [\n 'chirps-v2.0.{}.{:02d}.tif'.format(year, month) for month in\n month_series]\n\n # reclassify 0 to NoData in CHIRPS rasters\n output_precip_dir = os.path.join(save_dir, 'precip')\n if not os.path.exists(output_precip_dir):\n os.makedirs(output_precip_dir)\n for bn in precip_basename_list:\n base_raster = os.path.join(PRECIP_DIR, bn)\n target_raster = os.path.join(output_precip_dir, bn)\n pygeoprocessing.raster_calculator(\n [(base_raster, 1)], zero_to_nodata, target_raster,\n gdal.GDT_Float32, _IC_NODATA)\n\n # generate outputs\n for month in month_series:\n precip_raster = os.path.join(\n output_precip_dir, 'chirps-v2.0.{}.{:02d}.tif'.format(year, month))\n\n total_potential_biomass_path = os.path.join(\n save_dir, 'potential_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_potential_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_potential_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n total_standing_biomass_path = os.path.join(\n save_dir, 'standing_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_standing_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_standing_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n diet_sufficiency_path = os.path.join(\n save_dir, 'diet_sufficiency_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n diet_sufficiency_multiplier,\n diet_sufficiency_jitter]],\n precip_to_correlated_output, diet_sufficiency_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n animal_density_path = os.path.join(\n save_dir, 'animal_density_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n avg_animal_density,\n animal_density_jitter]],\n precip_to_animal_density, animal_density_path,\n gdal.GDT_Float32, _IC_NODATA)", "def main():\n\n while True:\n print(\"Let's explore some US bikeshare data!\")\n city, month, day = get_filters()\n df = load_data(city, month, day)\n # printing filter\n print(f\"Month: {month}, Day: {day}\")\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n display_records(df)\n restart = prompts.yes_no_prompt(\"\\nWould you like to restart?\\n\").launch()\n if not restart:\n break\n system(\"clear\")", "def test_runner_full_loop_big(caplog, big_dataset):\n caplog.set_level(logging.INFO)\n\n session = big_dataset\n\n start_date = datetime.datetime(2020, 5, 17, 13, 0, 3)\n end_date = datetime.datetime(2020, 5, 17, 13, 0, 6)\n replay_rate = 1 \n\n db_connector_test = DataBaseConnector(session=session, \n table_name='timeseries_dataset', \n time_column='timestamp', \n start_date=start_date,\n end_date=end_date)\n\n debug_publisher = FilePublisher()\n\n runner = CentralRunner(db_connection=db_connector_test, \n output_system=debug_publisher, \n start_time=start_date, \n end_time=end_date,\n replay_rate=replay_rate )\n\n start = time.perf_counter()\n \n runner.run()\n\n end = time.perf_counter()\n\n code_time = end - start\n assert int(code_time) == 2\n\n ## Test results\n assert os.path.exists('test_tmp/17-05-2020_13-00-03')\n assert os.path.exists('test_tmp/17-05-2020_13-00-05')\n\n assert len(os.listdir('test_tmp/17-05-2020_13-00-03')) == 9\n assert len(os.listdir('test_tmp/17-05-2020_13-00-05')) == 2", "def write_merged_file(self):\n \n #out_name = os.getcwd() + '/FAST_INDEX_merged_' + [ x for x in self.datasets[ list(self.datasets_keys)[0]].split('/') if '.nc' in x ] [0] \n \n \"\"\" Loading the econding of variables created from the harvester script \"\"\"\n encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n \n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True)\n \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n \n logging.info('Writing the observations_tables to the netCDF output via xarray to_netcdf() ')\n #obs_tab = self.MergedObs[ ['date_time' , 'latitude', 'longitude' , 'observation_value' , 'observed_variable' , 'source_id' , 'observation_id', 'z_coordinate' ] ] # including only some columns \n obs_tab = self.MergedObs # including only some columns \n obs_tab = self.add_cdm_missing_columns(obs_tab) \n \n \"\"\" \n # Old using xarray\n obs_tab = obs_tab.to_xarray() \n for v in obs_tab.variables:\n if v == \"index\" or v == \"hdrlen\" or 'string' in v:\n continue\n obs_tab[v].attrs['external_table'] = self.attributes['observations_table'][v]['external_table']\n obs_tab[v].attrs['description'] = self.attributes['observations_table'][v]['description']\n \"\"\"\n\n for k in obs_tab.columns:\n print('Writing the observation table using h5py new method for the variable: ' , k )\n df = obs_tab[ [k] ] # making a 1 column dataframe \n write_dict_h5(out_name, df, k, encodings['observations_table'], var_selection=[], mode='a', attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')})\n \n #obs_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='w' , group = 'observations_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the header_table to the netCDF output via xarray ')\n head_tab = self.MergedHead.to_xarray()\n for v in head_tab.variables: \n if v == \"index\" or v == \"hdrlen\" or v == \"string80\":\n continue\n head_tab[v].attrs['external_table'] = self.attributes['header_table'][v]['external_table']\n head_tab[v].attrs['description'] = self.attributes['header_table'][v]['description']\n \n head_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = 'header_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the station_configuration and source_configurations tables to the netCDF output via xarray ') \n for k in self.data.keys():\n if k == 'cdm_tables':\n continue \n group_name = k + '_station_configuration'\n sc = self.data[k]['station_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n group_name = k + '_source_configuration'\n sc = self.data[k]['source_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n \"\"\" To be fixed ! \"\"\"\n #group_name = k + '_source_configuration'\n #sc = self.data[k]['source_configuration'][:1].to_xarray()\n #sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name ) \n \n logging.info('Writing the merged record indices to the netCDF output ') \n di = self.MergedRecordIndex\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n \n logging.info('Writing the merged feedback to the netCDF output ') \n group_name = 'era5fb' \n di = self.MergedFeedback\n di = di.to_xarray()\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n logging.info('Writing the standard cdm tables to the netCDF output ') \n for t in self.data['cdm_tables'].keys(): \n d = self.data['cdm_tables'][t]\n d.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = t )\n \n logging.info('*** Done writing the output netCDF file ')", "def main():\n\n #for row_start in np.arange(0, 128):\n # MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [row_start], [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, [], '', ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '_cycle01020304_row'+str(row_start).zfill(3), Imin=12, Imax=136)\n\n Marcov_Chain_MLE(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '', 160.0, [90.0, 70.0, 50.0, 30.0], 0.0, 0.5)\n plt.show()\n #MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, [], '', ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '_cycle01020304_all')\n\n #IDS_VGS(14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAsource_VBdrain', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Fresh_vs_MLC01020304_VG1p8_VD2p0_IDS-VGS_VaS-VbD_', range(0, 128), 'Fresh vs MLC-1-2-3-4 (VG=1.8, VD=2.0)\\nMLC-{1, 2, 3, 4}: {2ms, 10ms, 40ms, 200ms} WL pulses, IDSAT threshold = {90, 70, 50, 30}uA, forward' , 150, ['fresh', 'MLC-01', 'MLC-02', 'MLC-03', 'MLC-04']) \n #IDS_VGS(14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAdrain_VBsource', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Fresh_vs_MLC01020304_VG1p8_VD2p0_IDS-VGS_VaD-VbS_', range(0, 128), 'Fresh vs MLC-1-2-3-4 (VG=1.8, VD=2.0)\\nMLC-{1, 2, 3, 4}: {2ms, 10ms, 40ms, 200ms} WL pulses, IDSAT threshold = {90, 70, 50, 30}uA, reversed', 150, ['fresh', 'MLC-01', 'MLC-02', 'MLC-03', 'MLC-04']) \n\n #hist_IDS_VGS(0, 14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAdrain_VBsource', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Hist-IDSAT_MLC-rv1-01020304_reverse-read_', range(0, 128), 'MLC programming {2ms, 10ms, 40ms, 200ms} pulses, VGS=1.8, VDS=2.0 for level=1-2-3-4\\nhistogram of read-IDSAT (VGS=VDS=0.8V)', 0, 150, 0, 150, 1000)\n #\n #t_label = []\n #for t in np.arange(0, 0.002*(71) + 0.0001, 0.002):\n # t_label.append(str(t))\n #\n ##MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [21], [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01_row-21', Imin=82, Imax=142)\n\n #for row_start in np.arange(0, 128):\n # MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [row_start], [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01_row_'+str(row_start).zfill(3), Imin=80, Imax=142)\n\n #MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01', Imin=80, Imax=142)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col30_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col30_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 10, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle01', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle0102', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle010203', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle01020304', 10, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 10, 160, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle01', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle0102', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle010203', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col18_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 40, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col18_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle01020304', 10, 125, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col24_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col24_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle0102', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle010203', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 20, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01020304', 5, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle0102', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle010203', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 20, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01020304', 5, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n\n # (L, Nfin, VT_flavor, Nrow, Imax)\n col_list = [(36, 1, 'ULVT', 32 , 60 ), (36, 1, 'LVT', 32 , 50 ), (36, 1, 'SVT', 32 , 45 ),\n (36, 1, 'ULVT', 128, 60 ), (36, 1, 'LVT', 128, 50 ), (36, 1, 'SVT', 128, 45 ),\n (20, 1, 'ULVT', 32 , 75 ), (20, 1, 'LVT', 32 , 60 ), (20, 1, 'SVT', 32 , 50 ),\n (20, 1, 'ULVT', 128, 75 ), (20, 1, 'LVT', 128, 60 ), (20, 1, 'SVT', 128, 50 ),\n (16, 1, 'ULVT', 32 , 80 ), (16, 1, 'LVT', 32 , 65 ), (16, 1, 'SVT', 32 , 60 ),\n (16, 1, 'ULVT', 128, 80 ), (16, 1, 'LVT', 128, 65 ), (16, 1, 'SVT', 128, 60 ),\n (36, 2, 'ULVT', 32 , 115), (36, 2, 'LVT', 32 , 95 ), (36, 2, 'SVT', 32 , 85 ),\n (36, 2, 'ULVT', 128, 115), (36, 2, 'LVT', 128, 95 ), (36, 2, 'SVT', 128, 85 ), \n (20, 2, 'ULVT', 32 , 135), (20, 2, 'LVT', 32 , 115), (20, 2, 'SVT', 32 , 100),\n (20, 2, 'ULVT', 128, 135), (20, 2, 'LVT', 128, 120), (20, 2, 'SVT', 128, 100),\n (16, 2, 'ULVT', 32 , 150), (16, 2, 'LVT', 32 , 125), (16, 2, 'SVT', 32 , 115),\n (16, 2, 'ULVT', 128, 150), (16, 2, 'LVT', 128, 125), (16, 2, 'SVT', 128, 115)]\n\n #MLC_IDSAT_algorithm_rv1(11, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(0, 128), [135+20], [0.2], 1, np.arange(0, 0.01*16+0.0001, 0.01), '', ['../Data/chip11/MLC_programming_Chip11_Col21_2msPULSE_VG1p8_VD2p4_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p4', '_rv1_cycle01_EfficientPython')\n\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', '0.9-1.2-1.5-1.8', 2.4, 128, range(0, 128), [59+16, 72+40, 80+31, 68+23], [0.2, 0.2, 0.2, 0.2], 4, [0, 15, 15.1, 37.5, 37.6, 59.8, 59.9, 78.1], ['0', '15', '', '37.4', '', '59.6', '', '77.8'], ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG-0p9-1p2-1p5-1p8_VD2p4', '_rv1_cycle01020304')\n\n t_ratio_lst = [(0, 0.17), (0.16, 0.34), (0.33, 0.505), (0.495, 0.67), (0.66, 0.84), (0.83, 1)]\n\n #t_label = []\n #for t in np.arange(0, 0.2*(59+16) + 0.0001, 0.2):\n # t_label.append(str(t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(0, 128), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(row_start, row_start+8), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(row_start, row_start+8), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(59+16), t_ratio[1]*0.2*(59+16)])\n # segment += 1\n\n #t_label = []\n #for t in np.arange(0, 0.2*(72+40) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(0, 128), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(row_start, row_start+8), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(row_start, row_start+8), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(72+40), t_ratio[1]*0.2*(72+40)])\n # segment += 1\n\n\n #t_label = []\n #for t in np.arange(0, 0.2*(80+31) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + 0.2*(72+40) + t))\n ##MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(0, 128), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(row_start, row_start+8), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(row_start, row_start+8), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(80+31), t_ratio[1]*0.2*(80+31)])\n # segment += 1\n\n\n #t_label = []\n #for t in np.arange(0, 0.2*(68+23) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + 0.2*(72+40) + 0.2*(80+31) + t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(0, 128), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(row_start, row_start+8), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(row_start, row_start+8), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(68+23), t_ratio[1]*0.2*(68+23)])\n # segment += 1\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col18_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle010203', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col18_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle010203', 16, 110)\n\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col24_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle010203', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col24_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle010203', 14, 133)\n\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col30_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle010203', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col30_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle010203', 20, 140)\n\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle01', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle01', 16, 110)\n\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle01', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle01', 14, 133)\n\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle01', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle01', 20, 140)\n\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle0102', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle0102', 16, 110)\n # \n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle0102', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle0102', 14, 133)\n # \n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle0102', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle0102', 20, 140)", "def main():\n\n\t# eesAmplitudes = range(200,321,10)\n\teesAmplitudes = [\"%\"+\"%.2f_0_0\"%(i) for i in np.arange(0,1.01,.05)]\n\t# eesFrequencies = range(10,1001,20)\n\teesFrequencies = np.logspace(1,3,50)\n\t# nrnStructureFile = \"fsSFrFfMnArtMod.txt\"\n\t# nrnStructureFile = \"fsSFrFfMnArtModHuman.txt\"\n\tnrnStructureFile = \"fsMnArtModHuman.txt\"\n\t# name = \"FreqAmpModHuman_0367S\"\n\tname = \"FreqAmpModHuman_ArtmodHuman_10msBurst\"\n\n\tnSim = len(eesFrequencies)*len(eesAmplitudes)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\t# simTime = 250\n\tsimTime = 15\n\tspecies = \"human\"\n\n\tfor eesAmplitude in eesAmplitudes:\n\t\tfor eesFrequency in eesFrequencies:\n\t\t\tfilName = name+\"_amp_\"+str(eesAmplitude)+\"_freq_\"+str(eesFrequency)\n\t\t\tresultFile = gt.find(\"*\"+filName+\".p\",pathToResults)\n\t\t\tif not resultFile:\n\t\t\t\treturnCode = None\n\t\t\t\twhile not returnCode==0:\n\t\t\t\t\tprogram = ['python','scripts/computeAfferentsEfferentsModulation.py',\n\t\t\t\t\t\tstr(eesFrequency),str(eesAmplitude),species,nrnStructureFile,name,\"--simTime\",str(simTime)]\n\t\t\t\t\tprint \" \".join(program)\n\t\t\t\t\tforwardSimulation = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\t\t\treturnCode = None\n\t\t\t\t\twhile returnCode is None:\n\t\t\t\t\t\tmessage = forwardSimulation.stdout.readline().rstrip(\"\\n\").split()\n\t\t\t\t\t\tif message != None:print \"\\t\\t\"+\" \".join(message)+\"\\t\\t\"\n\t\t\t\t\t\treturnCode = forwardSimulation.poll()\n\t\t\t\t\tif returnCode != 0: print \"\\t\\t\\t\\t Error n: \",forwardSimulation.poll(),\" resetting simulation...\"\n\t\t\tcount+=1\n\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\tplot_stats(eesAmplitudes,eesFrequencies,simTime,name)", "def main():\n\n logger.info('Process initiated - Building dataset')\n\n if os.path.isfile(train_path) and os.path.isfile(test_path):\n logger.info('Loading pickled data')\n return pd.read_pickle(train_path), pd.read_pickle(test_path)\n\n logger.info('Reading COSMIC Cancer Gene Census')\n gene_census = cancer_gene_census()\n gene_census.extend(civic_cancer_genes())\n\n gene_census = set(gene_census)\n\n training_data = pd.DataFrame()\n testing_data = pd.DataFrame()\n\n for cancer_type in cancer_types:\n data_file_name = cancer_type + \".meth.by_mean.data.txt\"\n data_file_location = os.path.join(data_location, data_file_name)\n\n logger.info('Reading Methylation data for {}'.format(cancer_type))\n\n methyl_data = pd.read_csv(data_file_location, delimiter='\\t', skiprows=[1], index_col=0)\n\n logger.info(\n 'Number of Genes: {0} | Number of Patients: {1}'.format(methyl_data.shape[0], methyl_data.shape[1]))\n logger.info('Preprocessing Methylation data')\n\n methyl_data = genes_feature_selection(methyl_data, gene_census)\n\n logger.info('Number of Genes after processing: {0}\\n'.format(methyl_data.shape[0]))\n\n methyl_data = add_classification_label(methyl_data)\n methyl_data = methyl_data.transpose()\n\n normal_cases = methyl_data[methyl_data['Tumor'] == 0]\n logger.info(normal_cases.shape)\n train_normal_cases = normal_cases.sample(frac=0.7, random_state=200)\n logger.info(train_normal_cases.shape)\n test_normal_cases = normal_cases.drop(train_normal_cases.index)\n logger.info(train_normal_cases.shape)\n\n tumor_cases = methyl_data[methyl_data['Tumor'] != 0]\n logger.info(tumor_cases.shape)\n train_tumor_cases = tumor_cases.sample(frac=0.7, random_state=200)\n logger.info(train_tumor_cases.shape)\n\n test_tumor_cases = tumor_cases.drop(train_tumor_cases.index)\n logger.info(test_tumor_cases.shape)\n\n training_data = training_data.append(train_normal_cases)\n training_data = training_data.append(train_tumor_cases)\n\n testing_data = testing_data.append(test_normal_cases)\n testing_data = testing_data.append(test_tumor_cases)\n\n training_data = training_data.sample(frac=1)\n testing_data = testing_data.sample(frac=1)\n\n logger.info('Pickling training and testing data')\n training_data.to_pickle(train_path)\n testing_data.to_pickle(test_path)\n\n logger.info('Processing completed!')\n visualize_data(training_data)\n\n return training_data, testing_data", "def run(self):\n\n # Setup hdf5 file and datasets\n self.vw_f = h5py.File(self.name,'w')\n self.vw,self.vwts = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n vw = self.vw_f.create_dataset('mov{}'.format(i), (self.hdf_resize, y, x), maxshape=(None, y, x), dtype='uint8', compression='lzf') \n vwts = self.vw_f.create_dataset('ts{}'.format(i), (self.hdf_resize,2), maxshape=(None,2), dtype=np.float64, compression='lzf')\n self.vw.append(vw)\n self.vwts.append(vwts)\n \n # Counters and buffers\n _sav_idx = [0]*self.n_cams # index within hdf5 dataset\n _buf_idx = [0]*self.n_cams # index of in-memory buffer that is periodicially dumped to hdf5 dataset\n _saving_buf,_saving_ts_buf = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n sb = np.empty((self.buffer_size,y,x), dtype=np.uint8)\n stb = np.empty((self.buffer_size,2), dtype=np.float64)\n _saving_buf.append(sb)\n _saving_ts_buf.append(stb)\n\n cams_running = [True for i in range(self.n_cams)]\n # Main loop\n while any(cams_running):\n # For all datasets: if there's not enough room to dump another buffer's worth into dataset, extend it\n # Then read new frames, and save/query as desired\n for di in range(self.n_cams):\n if not cams_running[di]:\n continue\n \n if self.vw[di].shape[0]-_sav_idx[di] <= self.buffer_size:\n assert self.vw[di].shape[0] == self.vwts[di].shape[0], 'Frame and timestamp dataset lengths are mismatched.'\n self.vw[di].resize((self.vw[di].shape[0]+self.hdf_resize, self.vw[di].shape[1], self.vw[di].shape[2]))\n self.vwts[di].resize((self.vwts[di].shape[0]+self.hdf_resize,self.vwts[di].shape[1]))\n \n # Get new frames from buffer, breaking out if empty and kill flag has been raised\n ts=temp=bsave=None\n try:\n ts,temp,bsave = self.frame_buffer[di].get(block=False)\n except Queue.Empty:\n if self.kill_flag.value:\n cams_running[di] = False\n continue\n\n if self.kill_flag.value==True:\n logging.info('Final flush for camera {}: {} frames remain.'.format(di, self.frame_buffer[di].qsize()))\n \n if di==self.query_idx and self.query_flag.value:\n self.query_queue[:] = temp.copy()\n self.query_queue_ts.value = ts[1]\n self.query_flag.value = False\n \n if bsave: # flag that this frame was added to queue during a saving period\n\n # add new data to in-memory buffer\n x,y = self.resolution[di]\n _saving_buf[di][_buf_idx[di]] = temp.reshape([y,x])\n _saving_ts_buf[di][_buf_idx[di]] = ts\n _buf_idx[di] += 1\n # if necessary, flush out buffer to hdf dataset\n if (self.flushing.value and _buf_idx[di]>=self.min_flush) or _buf_idx[di] >= self.buffer_size:\n if _buf_idx[di] >= self.buffer_size:\n logging.warning('Dumping camera b/c reached max buffer (buffer={}, current idx={})'.format(self.buffer_size, _buf_idx[di]))\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n _buf_idx[di] = 0\n\n # final flush:\n for di in range(self.n_cams):\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di]] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n # cut off all unused allocated space \n self.vw[di].resize([_sav_idx[di],self.vw[di].shape[1],self.vw[di].shape[2]])\n self.vwts[di].resize([_sav_idx[di],2])\n\n self.vw_f.close()\n self.saving_complete.value = True", "def preprocess(directory): \n\tprint (\"Preprocessing data from \" + directory + \" =>\")\n\t\n\tsmiles = []\n\tfiles = os.listdir(directory)\n\n\t# Read the data from the files \n\tprint (\"Reading data...\" )\n\twith click.progressbar(list(range(len(files)))) as bar:\n\t\tfor i in bar:\n\t\t\tf = open(directory + \"/\" + files[i], 'rb')\n\t\t\tsmiles += get_smiles_from_file(f)\n\t\t\tf.close()\n\n\t# print (smiles)\n\n\tt0 = time()\n\t# Convert the SMILES strings to MACCS fingerprints\n\tprint (\"Converting SMILES to MACCS fingerprints...\")\n\twith concurrent.futures.ProcessPoolExecutor() as executor:\n\t\twith click.progressbar(smiles) as bar:\n\t\t\t# Convert all SMILES to MACCS Keys\n\t\t\tprint (\"\")\n\t\t\tmaccs = []\n\t\t\t# Create a thread pool with size = no_of_cores\n\t\t\tpool = multiprocessing.Pool(multiprocessing.cpu_count())\n\t\t\tmaccs = pool.map(convert_to_fingerprint, bar)\n\t\t\tprint (\"\")\n\n\tprint (\"\")\n\tt1 = time() - t0\n\tprint (\"Time taken =\", t1, \"s\")\n\n\t# Remove all NoneType objects from maccs\n\tmaccs = remove_NoneTypes(maccs) \n\t\n\t# Check different statistics for maccs\n\tret_check = check_maccs(maccs)\n\n\t# Save data\n\tprint (\"Saving files =>\")\n\tprint (\"\")\n\tsave_data(maccs, 0.999)\n\t\n\t# Get statistics for current number of train\n\t# and test instances\n\tperform_filecheck()", "def inout_creator(df = pd.DataFrame(), features='datosrahm.csv'):\r\n df = df\r\n \r\n start=time.time()\r\n \r\n datos=pd.read_csv(features)\r\n datos=datos.fillna(-1)\r\n\r\n dicc=dict(datos[['Symbol','Z']].values)\r\n\r\n dicc['D']=1\r\n dicc['Bk']=97\r\n dicc['Cf']=98\r\n dicc['Es']=99\r\n dicc['Fm']=100\r\n dicc['Md']=101\r\n dicc['No']=102\r\n dicc['Lr']=103\r\n \r\n max_sitios = max(df['sitios'].values)\r\n \r\n X=np.zeros((len(df),max_sitios,104))\r\n\r\n mult=np.zeros((len(df),max_sitios))\r\n wyckmul=np.load('support/WyckoffSG_dict.npy').item()['wyckmul']\r\n \r\n todelete = list()\r\n \r\n for row in range(len(df)):\r\n item=df['WyckOcc'][row]\r\n sitios=list(item.values()) \r\n sitocc=np.zeros((len(sitios),104)) \r\n spacegroup = str(df['sgnum'][row]).zfill(3)\r\n \r\n try:\r\n \r\n s=[int(wyckmul[spacegroup][i]) for j in [list(item.keys()) for item in \\\r\n sitios] for i in j]\r\n \r\n except:\r\n print(row)\r\n print('There exists an error concerning with the space group of CIF ', df['cif'][row],'\\n')\r\n print('Please check in www.crystallography.net to provide the correct space group number of that CIF',\r\n '\\n','\\n')\r\n spacegroup=input('Give me the correct spacegroup:'+'\\n'+'\\n')\r\n s=[int(wyckmul[spacegroup][i]) for j in [list(item.keys()) for item in \\\r\n list(df['WyckOcc'][row].values())] for i in j]\r\n \r\n occs=[]\r\n for i in range(len(sitios)):\r\n\r\n for j in list(sitios[i].values()):\r\n \r\n ocupacion=np.array(list(j.values()))\r\n llaves=[llave.replace('+','').replace('-','').replace('1',\r\n '').replace('2','').replace('3','').replace('4',\r\n '') for llave in np.array(list(j.keys()))]\r\n llaves=[llave.replace('.','') for llave in llaves]\r\n llaves=[llave.replace('5','').replace('6','').replace('7',\r\n '').replace('8','').replace('9','').replace('0',\r\n '') for llave in llaves]\r\n vector=np.zeros((1,104))\r\n occs=[sum(ocupacion)]+occs\r\n \r\n try:\r\n \r\n idx=[dicc[k] for k in llaves]\r\n \r\n except:\r\n print('The compound with the cif ', df['cif'][row], ' will be deleted')\r\n print('The database will be updated')\r\n todelete += [row]\r\n \r\n for k in idx:\r\n vector[0][k-1] = ocupacion[idx.index(k)]\r\n \r\n sitocc[i]=vector\r\n \r\n while sitocc.shape[0] != max_sitios:\r\n sitocc=np.concatenate((np.zeros((1,104)),sitocc))\r\n s=[0]+s\r\n \r\n X[row,:,:]=sitocc\r\n mult[row]=s\r\n \r\n features=datos.iloc[:,2:].values\r\n x=X[:,:,:96]\r\n \r\n fracsum = np.expand_dims(np.sum(x,axis=2), axis=2)\r\n \r\n x=np.dot(x,features) \r\n \r\n x = np.delete(x, todelete,axis=0)\r\n df = df.drop(df.index[todelete]).reset_index(drop=True)\r\n \r\n print('inout_creator lasted ',round(time.time()-start,2),' s') \r\n return x, fracsum, df", "def process_A_2019_ADMET_DMPK():\n\n fname = f\"{DATA_PATH}/A.2019.ADMET_DMPK.csv\"\n fout_1 = f\"{PROCESSED_PATH}/A.2019.ADMET_DMPK.SSF.smi\"\n fout_2 = f\"{PROCESSED_PATH}/A.2019.ADMET_DMPK.CS.smi\"\n logging.info(f\"Processing {fname}\")\n\n try:\n import pubchempy as pcp\n except ModuleNotFoundError as e:\n print(e)\n return\n\n with open(fname, 'r') as fin, open(fout_1, 'w') as fout1, open(fout_2, 'w') as fout2:\n fin.readline()\n for line in fin:\n if line.startswith(\"\\\"\"):\n pairs = line.rstrip().split(\"\\\"\")\n name = pairs[1]\n pairs = pairs[2].split(',')\n logS0_SFF = pairs[0]\n logS0_CS = pairs[2]\n else:\n pairs = line.rstrip().split(',')\n name = pairs[0]\n logS0_SFF = pairs[1]\n logS0_CS = pairs[3]\n\n name = name.replace('\\\"', '')\n results = pcp.get_compounds(name, 'name')\n if len(results) > 0:\n isomeric_smiles = results[0].isomeric_smiles\n canon_smiles = canonicalize_smiles(isomeric_smiles)\n fout1.write(\"{},{}\\n\".format(canon_smiles, logS0_SFF))\n fout2.write(\"{},{}\\n\".format(canon_smiles, logS0_CS))", "def main():\n parser = argparse.ArgumentParser(description=\"Process the results of an experiment.\")\n parser.add_argument(\"experiment\")\n arguments = parser.parse_args()\n path = f\"experiments/{arguments.experiment}\"\n if not os.path.exists(path):\n raise SystemExit(f\"Path {path} does not exists.\")\n\n # For efficiency, one should generate the results from the parts without merging them.\n files = [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))]\n frames = []\n for file in files:\n device, experiment, _ = file.split(\".\")\n frame = pandas.read_csv(\n os.path.join(path, file),\n index_col=\"variable\",\n usecols=[\"variable\", \"group_index\", \"value_i\"], dtype={\"value_i\": \"Int64\"}\n )\n frame[\"board\"] = device\n frame[\"experiment\"] = experiment\n frames.append(frame)\n dataframe = pandas.concat(frames)\n frames = None\n\n current_grouping = dataframe.groupby([\"group_index\", \"variable\"])\n \n data = current_grouping.agg([\n numpy.median,\n _percentile_factory(95),\n numpy.mean,\n numpy.std,\n \"count\"\n ])\n\n print(data)\n \n data = data.droplevel([0], axis=1)\n data = data.unstack()\n data.columns = data.columns.map('_'.join)\n data.to_csv(f\"{arguments.experiment}.csv\")", "def main(args):\n hist_info = list()\n hist_info.append(\n (\n \"CutMll/BDT_NONRES_SF_FINE\",\n [\n -1,\n -0.906,\n -0.81,\n -0.704,\n -0.59,\n -0.435,\n -0.242,\n -0.075,\n 0.07,\n 0.196,\n 0.305,\n 0.4,\n 0.482,\n 0.554,\n 0.619,\n 0.678,\n 0.73,\n 0.775,\n 0.813,\n 0.847,\n 0.877,\n 0.903,\n 0.926,\n 0.946,\n 1,\n ],\n )\n )\n hist_info.append(\n (\n \"CutMll/BDT_NONRES_DF_FINE\",\n [\n -1.0,\n -0.932,\n -0.82,\n -0.635,\n -0.343,\n -0.119,\n 0.074,\n 0.224,\n 0.333,\n 0.414,\n 0.484,\n 0.546,\n 0.602,\n 0.657,\n 0.704,\n 0.744,\n 0.78,\n 0.824,\n 0.86,\n 0.891,\n 0.918,\n 0.939,\n 0.953,\n 0.975,\n 1,\n ],\n )\n )\n\n logging.info(\"Loading sample folder, this may take a while ...\")\n samples = TQSampleFolder.loadSampleFolder(args.input_file + \":\" + args.sample_folder)\n\n logging.info(\"Starting to create rebinned and remapped histograms\")\n for path, binning in hist_info:\n rebin_bdt_output(samples, path, binning)\n\n if args.output_file:\n new_file_name = args.output_file\n else:\n new_file_name = args.input_file.split(\".root\")[0] + \"_rebin.root\"\n if samples.writeToFile(new_file_name, True, 2, False):\n logging.info(\"Writing modified sample folder to %s was successful\", new_file_name)", "def stage6(self):\r\n dbutils = DBUtils()\r\n fits_images_list = glob.glob('/GARUDATA/IMAGING24/CYCLE24/*/FITS_IMAGE/*PBCOR*.FITS')\r\n # fits_images_list = ['/GARUDATA/IMAGING17/CYCLE17/4575/17_024_04NOV09/FITS_IMAGE/A3376-W.GMRT325.SP2B.PBCOR.FITS']\r\n # fits_images_list = ['/GARUDATA/IMAGING17/CYCLE17/4572/17_024_03NOV09/FITS_IMAGE/A3376-E.GMRT325.SP2B.PBCOR.FITS']\r\n counter = 1\r\n for fits_file in fits_images_list:\r\n counter += 1\r\n # fits_file = '/GARUDATA/IMAGING19/CYCLE19/5164/19_085_27DEC10/FITS_IMAGE/1445+099.GMRT325.SP2B.PBCOR.FITS'\r\n\r\n fits_dir = os.path.dirname(fits_file)\r\n\r\n fits_table = fits.open(fits_file)\r\n fits_header = fits_table[0].header\r\n\r\n data_keys = {}\r\n\r\n object = os.path.basename(fits_file).split('.')[0]\r\n # object = \"A3376_E\"\r\n\r\n # summary_file = glob.glob(fits_dir + '/spam_A3376-E*.summary')\r\n summary_file = glob.glob(fits_dir + '/spam_' + object + '*.summary')\r\n rms = \"NA\"\r\n for each_summary in summary_file:\r\n if 'DONE' in open(each_summary).read():\r\n # print each_summary\r\n lines = open(each_summary).readlines()\r\n rms = lines[-1].split(' ')[-5]\r\n # print rms\r\n else:\r\n # print \"Needs to be deleted\"\r\n if rms == \"NA\":\r\n log_file = each_summary.replace('summary', 'log')\r\n lines = open(log_file).readlines()\r\n rms = lines[-2].split(' ')[0]\r\n if rms == \"NA\":\r\n rms = 2.11\r\n\r\n print(fits_file)\r\n\r\n if \"CYCLE24\" in fits_file:\r\n dir_path = os.path.dirname(os.path.dirname(fits_file))\r\n observation_no = glob.glob(dir_path+\"/*.obslog\")[0].split('/')[-1].split('.')[0]\r\n print(observation_no)\r\n else:\r\n observation_no = fits_file.split('/')[4]\r\n\r\n columnKeys = {\r\n \"project_id\"\r\n }\r\n\r\n if observation_no == 'MIXCYCLE':\r\n mix_path = fits_file.split('/')[4]+'/'+fits_file.split('/')[5]\r\n mix_sql = \"select observation_no from projectobsno where file_path like '%\"+mix_path+\"%'\"\r\n mix_cycle_data = dbutils.select_gadpu_query(mix_sql)\r\n observation_no = mix_cycle_data[0][0]\r\n\r\n whereKeys = {\r\n \"observation_no\": observation_no\r\n }\r\n print(columnKeys, whereKeys)\r\n project_id = dbutils.select_from_table(\"projectobsno\", columnKeys, whereKeys, 0)\r\n print(project_id)\r\n if project_id:\r\n columnKeys = {\r\n \"das_scangroup_id\",\r\n \"ltacomb_file\"\r\n }\r\n whereKeys = {\r\n \"project_id\": project_id,\r\n }\r\n result = dbutils.select_from_table(\"ltadetails\", columnKeys, whereKeys, 0)\r\n\r\n print(result)\r\n print(result[1])\r\n\r\n sql = \"select ant_mask, band_mask, calcode, chan_width, corr_version, g.observation_no, \" \\\r\n \"date_obs, ddec, dec_2000, dec_date, dra, lsr_vel1, lsr_vel2, lta_time, \" \\\r\n \"net_sign1, net_sign2, net_sign3, net_sign4, num_chans, num_pols, onsrc_time, \" \\\r\n \"proj_code, qual, ra_2000, ra_date, rest_freq1, rest_freq2, sky_freq1, \" \\\r\n \"sky_freq2, source, sta_time from das.scangroup g inner join \" \\\r\n \"das.scans s on s.scangroup_id = g.scangroup_id \" \\\r\n \"where s.scangroup_id = \" + str(result[1]) + \" AND source like '\" + object + \"'\"\r\n scangroup_data = dbutils.select_scangroup_query(sql)\r\n\r\n # print(scangroup_data)\r\n\r\n if scangroup_data:\r\n data_keys = {\r\n \"ANTMASK\": scangroup_data[0],\r\n \"BANDMASK\": scangroup_data[1],\r\n \"CALCODE\": scangroup_data[2],\r\n \"CHANWIDT\": scangroup_data[3],\r\n \"CORRVERS\": scangroup_data[4],\r\n \"OBSNUM\": scangroup_data[5],\r\n \"DATEOBS\": str(scangroup_data[6]),\r\n \"DDEC\": scangroup_data[7],\r\n \"DEC2000\": scangroup_data[8],\r\n \"DECDATE\": scangroup_data[9],\r\n \"DRA\": scangroup_data[10],\r\n \"LSRVEL1\": scangroup_data[11],\r\n \"LSRVEL2\": scangroup_data[12],\r\n \"LTATIME\": scangroup_data[13],\r\n \"NETSIGN1\": scangroup_data[14],\r\n \"NETSIGN2\": scangroup_data[15],\r\n \"NETSIGN3\": scangroup_data[16],\r\n \"NETSIGN4\": scangroup_data[17],\r\n \"NUMCHANS\": scangroup_data[18],\r\n \"NUMPOLS\": scangroup_data[19],\r\n \"ONSRCTIM\": scangroup_data[20],\r\n \"PROJCODE\": scangroup_data[21],\r\n \"QUAL\": scangroup_data[22],\r\n \"RA2000\": scangroup_data[23],\r\n \"RADATE\": scangroup_data[24],\r\n \"RESTFRE1\": scangroup_data[25],\r\n \"RESTFRE2\": scangroup_data[26],\r\n \"SKYFREQ1\": scangroup_data[27],\r\n \"SKYFREQ2\": scangroup_data[28],\r\n \"STATIME\": scangroup_data[30],\r\n \"RMS\": float(rms)\r\n }\r\n\r\n # print(data_keys)\r\n filename = fits_file\r\n hdulist = fits.open(filename, mode='update')\r\n header = hdulist[0].header\r\n\r\n try:\r\n histroy = str(fits_header[\"HISTORY\"]).strip().split(' ')\r\n nh = [x for x in histroy if x]\r\n data_keys[\"BMAJ\"] = float(nh[3])\r\n data_keys[\"BMIN\"] = float(nh[5])\r\n data_keys[\"BPA\"] = float(nh[7])\r\n print(histroy)\r\n try:\r\n del header['HISTORY']\r\n except Exception as exh:\r\n print(exh)\r\n except Exception as ex:\r\n print(ex)\r\n try:\r\n if fits_header[\"BMAJ\"]:\r\n data_keys[\"BMAJ\"] = float(fits_header[\"BMAJ\"])\r\n data_keys[\"BMIN\"] = float(fits_header[\"BMIN \"])\r\n data_keys[\"BPA\"] = float(fits_header[\"BPA\"])\r\n except Exception as ex:\r\n print(ex)\r\n\r\n pbcor_file = os.path.basename(fits_file).split('.')[0]\r\n spam_log = glob.glob(os.path.dirname(fits_file) + \"/spam_\" + pbcor_file + \"*.log\")\r\n spam_log.sort()\r\n spam_log = spam_log[0]\r\n reading_spam_log = open(spam_log).readlines()\r\n bmaj_bmin = []\r\n if len(reading_spam_log) > 0:\r\n for each_line in reading_spam_log:\r\n if \"BMAJ\" in each_line:\r\n bmaj_bmin.append(each_line)\r\n bmaj_bmin_data = bmaj_bmin[0].replace(' ',' ').replace(\" \",\" \").replace(\"= \",\"=\").split((\r\n ' '))\r\n print(bmaj_bmin_data)\r\n for each_key in bmaj_bmin_data:\r\n if \"BMAJ\" in each_key:\r\n data_keys[\"BMAJ\"] = float(each_key.split('=')[1])\r\n if \"BMIN\" in each_key:\r\n data_keys[\"BMIN\"] = float(each_key.split('=')[1])\r\n if \"BPA\" in each_key:\r\n data_keys[\"BPA\"] = float(each_key.split('/')[0].split('=')[1])\r\n print( data_keys[\"BMAJ\"], data_keys[\"BMIN\"], data_keys[\"BPA\"])\r\n try:\r\n for key, value in data_keys.iteritems():\r\n print key, value\r\n header.set(key, value)\r\n hdulist.flush()\r\n except Exception as ex:\r\n print(ex)", "def reduce():\n\n # find obs log\n logname_lst = [fname for fname in os.listdir(os.curdir)\n if fname[-7:]=='.obslog']\n if len(logname_lst)==0:\n print('No observation log found')\n exit()\n elif len(logname_lst)>1:\n print('Multiple observation log found:')\n for logname in sorted(logname_lst):\n print(' '+logname)\n else:\n pass\n\n # read obs log\n logtable = read_obslog(logname_lst[0])\n\n # load config files\n config = configparser.ConfigParser(\n inline_comment_prefixes = (';','#'),\n interpolation = configparser.ExtendedInterpolation(),\n )\n # find local config file\n for fname in os.listdir(os.curdir):\n if fname[-4:]=='.cfg':\n config.read(fname)\n print('Load Congfile File: {}'.format(fname))\n break\n\n # extract keywords from config file\n section = config['data']\n rawpath = section.get('rawpath')\n statime_key = section.get('statime_key')\n exptime_key = section.get('exptime_key')\n section = config['reduce']\n midpath = section.get('midpath')\n odspath = section.get('odspath')\n figpath = section.get('figpath')\n mode = section.get('mode')\n fig_format = section.get('fig_format')\n oned_suffix = section.get('oned_suffix')\n\n # create folders if not exist\n if not os.path.exists(figpath): os.mkdir(figpath)\n if not os.path.exists(odspath): os.mkdir(odspath)\n if not os.path.exists(midpath): os.mkdir(midpath)\n\n nccd = 3\n\n ########################## load file selection #############################\n sel_lst = {}\n filesel_filename = 'file_selection.txt'\n if os.path.exists(filesel_filename):\n sel_file = open(filesel_filename)\n for row in sel_file:\n row = row.strip()\n if len(row)==0 or row[0] in '#':\n continue\n g = row.split(':')\n key, value = g[0].strip(), g[1].strip()\n if len(value)>0:\n sel_lst[key] = value\n sel_file.close()\n\n ################################ parse bias ################################\n bias_file = config['reduce.bias'].get('bias_file')\n\n if mode=='debug' and os.path.exists(bias_file):\n has_bias = True\n # load bias data from existing file\n hdu_lst = fits.open(bias_file)\n # pack bias image\n bias = [hdu_lst[iccd+1].data for iccd in range(nccd)]\n hdu_lst.close()\n message = 'Load bias data from file: {}'.format(bias_file)\n logger.info(message)\n print(message)\n else:\n # read each individual CCD\n bias_data_lst = [[] for iccd in range(nccd)]\n\n # initialize printing infomation\n pinfo1 = FormattedInfo(all_columns, ['frameid', 'fileid', 'object',\n 'exptime', 'nsat_1', 'q95_1', 'nsat_2', 'q95_2',\n 'nsat_3', 'q95_3'])\n\n for logitem in logtable:\n if logitem['object'].strip().lower()=='bias':\n fname = logitem['fileid']+'.fits'\n filename = os.path.join(rawpath, fname)\n hdu_lst = fits.open(filename)\n data_lst, mask_lst = parse_3ccd_images(hdu_lst)\n hdu_lst.close()\n\n # print info\n if len(bias_data_lst[0]) == 0:\n print('* Combine Bias Images: {}'.format(bias_file))\n print(' '*2 + pinfo1.get_separator())\n print(' '*2 + pinfo1.get_title())\n print(' '*2 + pinfo1.get_separator())\n string = pinfo1.get_format().format(logitem)\n print(' '*2 + print_wrapper(string, logitem))\n\n for iccd in range(nccd):\n bias_data_lst[iccd].append(data_lst[iccd])\n\n n_bias = len(bias_data_lst[0]) # get number of bias images\n has_bias = n_bias > 0\n\n if has_bias:\n # there is bias frames\n print(' '*2 + pinfo1.get_separator())\n\n bias = []\n # the final HDU list\n bias_hdu_lst = fits.HDUList([fits.PrimaryHDU()])\n\n # scan for each ccd\n for iccd in range(nccd):\n ### 3 CCDs loop begins here ###\n bias_data_lst[iccd] = np.array(bias_data_lst[iccd])\n\n section = config['reduce.bias']\n sub_bias = combine_images(bias_data_lst[iccd],\n mode = 'mean',\n upper_clip = section.getfloat('cosmic_clip'),\n maxiter = section.getint('maxiter'),\n mask = (None, 'max')[n_bias>=3],\n )\n\n message = '\\033[{2}mCombined bias for CCD {0}: Mean = {1:6.2f}\\033[0m'.format(\n iccd+1, sub_bias.mean(), (34, 32, 31)[iccd])\n\n print(message)\n\n head = fits.Header()\n head['HIERARCH GAMSE BIAS NFILE'] = n_bias\n\n ############## bias smooth ##################\n section = config['reduce.bias']\n if section.getboolean('smooth'):\n # bias needs to be smoothed\n smooth_method = section.get('smooth_method')\n\n h, w = sub_bias.shape\n if smooth_method in ['gauss', 'gaussian']:\n # perform 2D gaussian smoothing\n smooth_sigma = section.getint('smooth_sigma')\n smooth_mode = section.get('smooth_mode')\n \n bias_smooth = gaussian_filter(sub_bias,\n sigma=smooth_sigma, mode=smooth_mode)\n\n # write information to FITS header\n head['HIERARCH GAMSE BIAS SMOOTH'] = True\n head['HIERARCH GAMSE BIAS SMOOTH METHOD'] = 'GAUSSIAN'\n head['HIERARCH GAMSE BIAS SMOOTH SIGMA'] = smooth_sigma\n head['HIERARCH GAMSE BIAS SMOOTH MODE'] = smooth_mode\n else:\n print('Unknown smooth method: ', smooth_method)\n pass\n\n sub_bias = bias_smooth\n else:\n # bias not smoothed\n head['HIERARCH GAMSE BIAS SMOOTH'] = False\n\n bias.append(sub_bias)\n bias_hdu_lst.append(fits.ImageHDU(data=sub_bias, header=head))\n ### 3 CCDs loop ends here ##\n\n # write bias into file\n bias_hdu_lst.writeto(bias_file, overwrite=True)\n\n else:\n # no bias found\n pass\n\n ########################## find flat groups #########################\n flat_file = config['reduce.flat'].get('flat_file')\n\n flatdata_lst = []\n # a list of 3 combined flat images. [Image1, Image2, Image3]\n # bias has been corrected already. but not rotated yet.\n flatmask_lst = []\n # a list of 3 flat masks\n\n if mode=='debug' and os.path.exists(flat_file):\n # read flat data from existing file\n hdu_lst = fits.open(flat_file)\n for iccd in range(nccd):\n flatdata_lst.append(hdu_lst[iccd*2+1].data)\n flatmask_lst.append(hdu_lst[iccd*2+2].data)\n flatdata = hdu_lst[nccd*2+1].data.T\n flatmask = hdu_lst[nccd*2+2].data.T\n hdu_lst.close()\n message = 'Loaded flat data from file: {}'.format(flat_file)\n print(message)\n\n # alias of flat data and mask\n flatdata1 = flatdata_lst[0].T\n flatmask1 = flatmask_lst[0].T\n flatdata2 = flatdata_lst[1].T\n flatmask2 = flatmask_lst[1].T\n flatdata3 = flatdata_lst[2].T\n flatmask3 = flatmask_lst[2].T\n\n else:\n print('*'*10 + 'Parsing Flat Fieldings' + '*'*10)\n # print the flat list\n pinfo_flat = FormattedInfo(all_columns, ['frameid', 'fileid', 'object',\n 'exptime', 'nsat_1', 'q95_1', 'nsat_2', 'q95_2', 'nsat_3', 'q95_3'])\n print(' '*2 + pinfo_flat.get_separator())\n print(' '*2 + pinfo_flat.get_title())\n print(' '*2 + pinfo_flat.get_separator())\n for logitem in logtable:\n if len(logitem['object'])>=8 and logitem['object'][0:8]=='flatlamp':\n string = pinfo_flat.get_format().format(logitem)\n print(' '*2 + print_wrapper(string, logitem))\n print(' '*2 + pinfo_flat.get_separator())\n\n\n flat_group_lst = {}\n for iccd in range(nccd):\n\n key = 'flat CCD%d'%(iccd+1)\n sel_string = sel_lst[key] if key in sel_lst else ''\n prompt = '\\033[{1}mSelect flats for CCD {0} [{2}]: \\033[0m'.format(\n iccd+1, (34, 32, 31)[iccd], sel_string)\n\n # read selected files from terminal\n while(True):\n input_string = input(prompt)\n if len(input_string.strip())==0:\n # nothing input\n if key in sel_lst:\n # nothing input but already in selection list\n flat_group_lst[iccd] = parse_num_seq(sel_lst[key])\n break\n else:\n # repeat prompt\n continue\n else:\n # something input\n frameid_lst = parse_num_seq(input_string)\n # pack\n flat_group_lst[iccd] = frameid_lst\n # put input string into selection list\n sel_lst[key] = input_string.strip()\n break\n\n # now combine flat images\n\n flat_hdu_lst = [fits.PrimaryHDU()]\n # flat_hdu_lst is the final HDU list to be saved as fits\n\n for iccd in range(nccd):\n frameid_lst = flat_group_lst[iccd]\n\n # now combine flats for this CCD\n flat_data_lst = []\n # flat_data_lst is a list of flat images to be combined.\n # flat_data_lst = [Image1, Image2, Image3, Image4, ... ...]\n\n #scan the logtable\n # log loop inside the CCD loop because flats for different CCDs are\n # in different files\n for logitem in logtable:\n if logitem['frameid'] in frameid_lst:\n filename = os.path.join(rawpath, logitem['fileid']+'.fits')\n hdu_lst = fits.open(filename)\n data_lst, mask_lst = parse_3ccd_images(hdu_lst)\n hdu_lst.close()\n\n # correct bias and pack into flat_data_lst\n if has_bias:\n flat_data_lst.append(data_lst[iccd]-bias[iccd])\n else:\n flat_data_lst.append(data_lst[iccd])\n\n # initialize flat mask\n if len(flat_data_lst) == 1:\n flatmask = mask_lst[iccd]\n flatmask = flatmask | mask_lst[iccd]\n\n n_flat = len(flat_data_lst)\n\n if n_flat == 0:\n continue\n elif n_flat == 1:\n flatdata = flat_data_lst[0]\n else:\n flat_data_lst = np.array(flat_data_lst)\n flatdata = combine_images(flat_data_lst,\n mode = 'mean',\n upper_clip = 10,\n maxiter = 5,\n mask = (None, 'max')[n_flat>=3],\n )\n #print('\\033[{1}mCombined flat data for CCD {0}: \\033[0m'.format(\n # iccd+1, (34, 32, 31)[iccd]))\n flatdata_lst.append(flatdata)\n flatmask_lst.append(flatmask)\n\n # pack the combined flat data into flat_hdu_lst\n head = fits.Header()\n head['HIERARCH GAMSE FLAT CCD{} NFILE'.format(iccd+1)] = n_flat\n flat_hdu_lst.append(fits.ImageHDU(flatdata, head))\n flat_hdu_lst.append(fits.ImageHDU(flatmask))\n # CCD loop ends here\n\n # alias of flat data and mask\n flatdata1 = flatdata_lst[0].T\n flatmask1 = flatmask_lst[0].T\n flatdata2 = flatdata_lst[1].T\n flatmask2 = flatmask_lst[1].T\n flatdata3 = flatdata_lst[2].T\n flatmask3 = flatmask_lst[2].T\n\n # mosaic flat data\n flatdata, flatmask = mosaic_3_images(\n data_lst = (flatdata1, flatdata2, flatdata3),\n mask_lst = (flatmask1, flatmask2, flatmask3),\n )\n\n flat_hdu_lst.append(fits.ImageHDU(flatdata.T))\n flat_hdu_lst.append(fits.ImageHDU(flatmask.T))\n # write flat data to file\n flat_hdu_lst = fits.HDUList(flat_hdu_lst)\n flat_hdu_lst.writeto(flat_file, overwrite=True)\n print('Flat data writed to {}'.format(flat_file))\n\n ######################### find & trace orders ##########################\n\n # simple debackground for all 3 CCDs\n xnodes = np.arange(0, flatdata1.shape[1], 200)\n flatdbkg1 = simple_debackground(flatdata1, flatmask1, xnodes, smooth=20,\n deg=3, maxiter=10)\n\n xnodes = np.arange(0, flatdata2.shape[1], 200)\n flatdbkg2 = simple_debackground(flatdata2, flatmask2, xnodes, smooth=20,\n deg=3, maxiter=10)\n\n xnodes = np.arange(0, flatdata3.shape[1], 200)\n flatdbkg3 = simple_debackground(flatdata3, flatmask3, xnodes, smooth=20,\n deg=3, maxiter=10)\n\n allimage, allmask = mosaic_3_images(\n data_lst = (flatdbkg1, flatdbkg2, flatdbkg3),\n mask_lst = (flatmask1, flatmask2, flatmask3),\n )\n\n tracefig = TraceFigure()\n\n section = config['reduce.trace']\n aperset = find_apertures(allimage, allmask,\n scan_step = section.getint('scan_step'),\n minimum = section.getfloat('minimum'),\n separation = section.get('separation'),\n align_deg = section.getint('align_deg'),\n filling = section.getfloat('filling'),\n degree = section.getint('degree'),\n display = section.getboolean('display'),\n fig = tracefig,\n )\n # decorate trace fig and save to file\n tracefig.adjust_positions()\n tracefig.suptitle('Trace for all 3 CCDs', fontsize=15)\n figfile = os.path.join(figpath, 'trace.png')\n tracefig.savefig(figfile)\n\n trcfile = os.path.join(midpath, 'trace.trc')\n aperset.save_txt(trcfile)\n\n regfile = os.path.join(midpath, 'trace.reg')\n aperset.save_reg(regfile, transpose=True)\n\n # save mosaiced flat image\n trace_hdu_lst = fits.HDUList(\n [fits.PrimaryHDU(allimage.T),\n fits.ImageHDU(allmask.T),\n ])\n trace_hdu_lst.writeto(config['reduce.trace'].get('file'), overwrite=True)\n\n ######################### Extract flat spectrum ############################\n\n spectra1d = extract_aperset(flatdata, flatmask,\n apertureset = aperset,\n lower_limit = 6,\n upper_limit = 6,\n )\n\n flatmap = get_slit_flat(flatdata, flatmask,\n apertureset = aperset,\n spectra1d = spectra1d,\n lower_limit = 6,\n upper_limit = 6,\n deg = 7,\n q_threshold = 20**2,\n figfile = 'spec_%02d.png',\n )\n fits.writeto('flat_resp.fits', flatmap, overwrite=True)", "def main():\n input_file_path = sys.argv[1]\n output_file_path = sys.argv[2]\n gps_df = create_df(input_file_path) # creates a data frame\n gps_df = clean_data(gps_df) # cleans the data\n print('Cleaning done')\n write_to_kml(gps_df, output_file_path) # writes to kml file", "def datamerge_run(filenames, outdir, roc_cols):\n \n tbldict = collect2dict(filenames, outdir)\n tbldict = cogtest_manipulation(tbldict, roc_cols)\n \n #count number of tps\n tbldict['cogtests'] = count_instances(tbldict['cogtests'], 'codeb', 'NP_NoTps')\n tbldict['aseg_change'] = count_instances(tbldict['aseg_change'], 'codea', 'MRI_NoTps')\n tbldict['pibparams'] = count_instances(tbldict['pibparams'], 'codea', 'PIB_NoTps')\n \n new_tbldict = {}\n for key, tbl in tbldict.iteritems():\n tpcol = [s for s in tbl.columns if ('_Tp' in s)]\n if tpcol:\n tpcol = tpcol[0]\n tblflat, tblflatnm = flatten(tbl, tpcol, key, [1, '1'])\n new_tbldict[tblflatnm] = tblflat\n tbldict.update(new_tbldict)\n \n #make sure each table contains SubjID and BAC# fields\n for key, tbl in tbldict.iteritems():\n tbl = addcodes(tbl, tbldict['codetranslator'])\n tbldict[key] = tbl\n \n #merge tables\n tblstojoin = ['cogtests_flat','pibparams_flat','aseg_change_flat','fdg_metaroi_flat','subjinfo']\n joincol = ['codea','codeb']\n subjtbl = mergelots(tbldict, tblstojoin, joincol)\n \n #merge tables\n tblstojoin = ['cogtests','subjinfo','pibparams_flat','aseg_change_flat','fdg_metaroi_flat']\n joincol = ['codea','codeb']\n NPtbl = mergelots(tbldict, tblstojoin, joincol)\n \n cf.save_xls_and_pkl(subjtbl, 'subjtbl', outdir)\n cf.save_xls_and_pkl(NPtbl, 'NPtbl', outdir)\n \n return tbldict, NPtbl, subjtbl", "def main(argv):\n # -- load our run database and make it global --\n global crysDB\n with open(\"crysDB.json\") as f:\n crysDB = json.load(f)\n\n # -- parse args --\n par = argparse.ArgumentParser(description=\"coherent crystal characterization suite\")\n arg = par.add_argument\n arg(\"-c\", \"--crys\", type=str, help=\"set crystal S/N\")\n arg(\"-p\", \"--proc\", type=str, help=\"process a crystal\")\n arg(\"-t\", \"--temp\", type=str, help='start temperature data taking')\n arg(\"-pt\", \"--printtemp\", type=str, help='print current temperature')\n arg(\"-a\", \"--all\", action=\"store_true\", help=\"process all crystals in the DB\")\n arg(\"-o\", \"--over\", action=\"store_true\", help=\"overwrite existing files\")\n arg(\"-z\", \"--zip\", action=\"store_true\", help='run gzip on raw files (on cenpa-rocks)')\n arg(\"-s\", \"--sync\", action=\"store_true\", help='sync DAQ with cenpa-rocks')\n args = vars(par.parse_args())\n\n # -- set parameters --\n crys_sn, overwrite = None, False\n\n if args[\"crys\"]:\n crys_sn = args[\"crys\"]\n\n if args[\"over\"]:\n overwrite = args[\"over\"]\n\n # -- run analysis --\n if args[\"proc\"]:\n sn = args[\"proc\"]\n process_crystal(sn, overwrite)\n\n if args[\"all\"]:\n all_sns = [k for k in crysDB if \"SN\" in k]\n for sn in all_sns:\n process_crystal(sn, overwrite)\n\n if args[\"sync\"]:\n sync_data()\n\n if args[\"zip\"]:\n # clean_gzip()\n zip_data(overwrite)\n\n if args[\"temp\"]:\n \"\"\"\n Run number should be the first run number entry (for 600V) in the ELOG.\n \"\"\"\n run_num = args[\"temp\"]\n measure_temp(run_num)\n\n if args[\"printtemp\"]:\n print_temp()", "def main():\n\n parser = ArgumentParser()\n parser.add_argument('--config', '-c', type=str, required=True, help='Path to config file')\n parser.add_argument('--snapshot_path', '-s', type=str, required=True, default='', help='Path to model snapshot')\n parser.add_argument('--output_dir', '-o', type=str, required=True, default='', help='Path to output directory')\n args = parser.parse_args()\n\n assert exists(args.config)\n assert exists(args.snapshot_path + '.index')\n\n if not exists(args.output_dir):\n makedirs(args.output_dir)\n\n task_monitor = get_monitor(args.config, snapshot_path=args.snapshot_path)\n\n converted_snapshot_path = join(args.output_dir, CKPT_FILE_NAME)\n task_monitor.eliminate_train_ops(converted_snapshot_path)\n\n converted_model_path = '{}-{}'.format(converted_snapshot_path,\n int(basename(args.snapshot_path).split('-')[-1]))\n task_monitor.save_model_graph(converted_model_path, args.output_dir)\n\n task_monitor.freeze_model_graph(converted_model_path,\n join(args.output_dir, PB_FILE_NAME),\n join(args.output_dir, FROZEN_FILE_NAME))", "def main() -> None:\n\n task_results = {}\n for task in (Task.SINGLE_SEQUENCE, Task.MULTI_SEQUENCE):\n task_results[task] = []\n for category in CO3D_CATEGORIES[: (20 if task == Task.SINGLE_SEQUENCE else 10)]:\n for single_sequence_id in (\n (0, 1) if task == Task.SINGLE_SEQUENCE else (None,)\n ):\n category_result = evaluate_dbir_for_category(\n category, task=task, single_sequence_id=single_sequence_id\n )\n print(\"\")\n print(\n f\"Results for task={task}; category={category};\"\n + (\n f\" sequence={single_sequence_id}:\"\n if single_sequence_id is not None\n else \":\"\n )\n )\n pretty_print_nvs_metrics(category_result)\n print(\"\")\n\n task_results[task].append(category_result)\n _print_aggregate_results(task, task_results)\n\n for task in task_results:\n _print_aggregate_results(task, task_results)", "def go():\n u_input = UserInput()\n\n # Locates important folders\n input_folder = u_input.get_input_folder()\n working_folder = u_input.get_working_folder()\n output_folder = u_input.get_output_folder()\n\n # Remaining information of the configuration file\n sequence_type = u_input.get_sequence_type()\n protein_type = u_input.get_protein_type()\n check_settings(sequence_type, protein_type)\n accession_ncbi_list = u_input.get_genome_accessions()\n user_email = u_input.get_user_email()\n distance_function = u_input.get_distance_function()\n e_value = u_input.get_e_value()\n cutoff = u_input.get_cutoff()\n replicates = u_input.get_replicates()\n blast_word_size = u_input.get_blast_word_size()\n\n # Output files configuration\n majority_or_support_tree = u_input.get_phylogenetic_tree_type()\n original_newick_tree = u_input.get_original_newick_tree()\n original_distance_matrix = u_input.get_original_distance_matrix()\n bootstrap_distance_matrix = u_input.get_bootstrap_distance_matrix()\n\n # Deletes old content from files\n delete_folder_content(working_folder)\n # delete_folder_content(output_folder)\n\n # Downloads NCBI files\n access_ncbi(accession_ncbi_list, user_email, input_folder)\n\n # Preprocessing phase\n n_files = 0\n error_list = []\n preprocess_phase = Preprocess()\n for file in os.listdir(\"../\" + input_folder): # Navigates into the input_folder\n n_files += 1\n error_list = preprocess_phase.preprocessing_phase(file, input_folder, sequence_type, protein_type, working_folder)\n\n # Displays a list of error detected in the preprocessing code\n display_error_messages(error_list)\n\n if len(error_list) < n_files - 1:\n alignment = Blast()\n # Builds a database\n distance_dictionary, coverage_vector_dictionary = alignment.make_blast_database(\n sequence_type, working_folder, e_value, blast_word_size)\n print(\"Sequence alignment has been done\")\n\n # Calculates distances and generates a phylogenetic tree in newick format\n phylogeny_tree = Phylogeny()\n print(\"Creating phylogenetic trees\")\n newick_tree = phylogeny_tree.get_newick_tree(coverage_vector_dictionary, distance_dictionary, distance_function,\n replicates, working_folder, output_folder,\n original_distance_matrix, bootstrap_distance_matrix,\n original_newick_tree)\n\n # Read and concatenates trees from files\n tree_list = phylogeny_tree.get_tree_list(working_folder)\n\n # Generates a consensus trees with or without support\n if majority_or_support_tree in [\"Support\", \"support\"]:\n phylogeny_tree.get_support_tree(newick_tree, tree_list, output_folder)\n elif majority_or_support_tree in [\"Majority\", \"majority\"]:\n phylogeny_tree.majority_consensus_tree(output_folder, tree_list, cutoff)\n else:\n if majority_or_support_tree in [\"Both\", \"both\"]:\n phylogeny_tree.get_support_tree(newick_tree, tree_list, output_folder)\n phylogeny_tree.majority_consensus_tree(output_folder, tree_list, cutoff)\n else:\n print(\"No majority tree consensus or support tree will be calculated\")\n else:\n print('\\n', \"At least two correct sequences to compare are needed. Please, check the error list to solve the \"\n \"detected problems and the content of the '\" + input_folder + \"' folder.\")", "def run(self):\n\n # If the specified outdir doesn't exist, make it.\n if os.path.exists(self.outdir) == False:\n os.mkdir(self.outdir)\n\n # Get occurrence data.\n self.get_gbif_occs()", "def main(input_path, output_path):\n logger.info('making final data set from raw data')\n\n index_path = 'data/raw/trec07p/full/index'\n index = getIndexMap(index_path, f'{input_path}/trec07p/data/')\n interim_path = 'data/interim'\n df = pd.DataFrame(columns=columns)\n\n count = 0\n if not path.exists(interim_path):\n logger.info(f'converting external txt files to trec07.csv in {interim_path}')\n mkdir(interim_path)\n for email in listdir(f'{input_path}/trec07p/data'):\n addEmailToDf(f'{input_path}/trec07p/data/{email}', index, df)\n count += 1\n if count % 1000 == 0:\n logger.info(f'conversion done for {count}/75000 files')\n df.to_csv(f'{interim_path}/trec07.csv', index=False)", "def NAME():\n\n # Location of data\n base_dir = \"(Location)\" #Location of align tif --> Should be the location of the experiment's align tiff folder, ex: \"C/desktop/work/image_processing/YYYYMMDD/align_tiffs\"\n resolution = {'res_xy_nm': 100, 'res_z_nm': 70} #Resolution of a pixel (do not alter)\n thresh = 0.9 #What qualifies for final probability map (do not alter)\n number_of_datasets = 20 #Number of wells in the experiemnts, \"20\" is an example where there are 16 samples and 4 controls\n\n #Rb Antibody\n conjugate_fn_str = 'GAD2' #String segment to search in a filename\n #conjugate_fn_str should be the term used in the name of the control align tiff for a well (usually \"PSD\", \"GAD2\", or \"SYNAPSIN\")\n target_fn_str = 'L106'\n #Ms Antibody project name, no parent or subclone number needed\n #target_fn_str should be the project number, for instance if this was testing L109 samples, this would be \"L109\"\n #Takes base directory string and gives you an array of all the files within\n filenames = aa.getListOfFolders(base_dir) #Do not change\n conjugate_filenames = [] #Do not change\n target_filenames = [] #Do not change\n query_list = [] #Do not change\n folder_names = [] #Do not change\n\n for n in range(1, 17):\n #Use if dataset missing\n #This is where you put in the rangee of wells used as your test samples\n #Since we have 16 samples that are test samples for L106, the range is equal to 1 through n+1, or 1 through 17\n #If your test samples do not begin at well 1, then adjust the beginning of the range accordingly (3 through 17 if the first test sample is in well 3) \n #continue\n\n print('Well: ', str(n)) #Do not change\n folder_names.append('Test-' + str(n)) # Collate 'dataset' names for excel sheet #Do not change\n conjugate_str = str(n) + '-' + conjugate_fn_str #creates filename to search for #Creates n-conjugatename #Do not change\n target_str = str(n) + '-' + target_fn_str #Do not change\n\n # Search for file associated with the specific dataset number\n indices = [i for i, s in enumerate(filenames) if conjugate_str == s[0:len(conjugate_str)]] #Do not change\n conjugate_name = filenames[indices[0]] #Do not change\n print(conjugate_name) #Do not change\n indices = [i for i, s in enumerate(filenames) if target_str == s[0:len(target_str)]] #Do not change\n target_name = filenames[indices[0]] #Do not change\n print(target_name) #Do not change\n \n conjugate_filenames.append(conjugate_name) #Do not change\n target_filenames.append(target_name) #Do not change\n\n # Create query\n #\n query = {'preIF': [conjugate_name], 'preIF_z': [2],\n 'postIF': [target_name], 'postIF_z': [1],\n 'punctumSize': 2}\n #preIF = items that are presynaptic targets go here, because GAD2, our conjugate, is presynaptic I put the conjugate_name in this box\n #preIF_z = how many tiffs a puncta must be in to be registered, conjugate sample number is 2 so 2 goes in this box\n #postIF = items that are postsynaptic targets go here, L106 is postsynaptic so I put target_name here\n #postIF_z = how many tiffs a puncta must be in to be registered, target sample number is 1 (for now unless changed later) \n #punctumSize = size of punctum the algorithm is looking for, do not change unless directed to\n\n \"\"\"Example of a presynaptic target and presynaptic conjugate\n query = {'preIF': [target_name,conjugate_name], 'preIF_z': [1,2],\n 'postIF': [], 'postIF_z': [],\n 'punctumSize': 2}\"\"\"\n\n \"\"\"Example of a postsynaptic target and presynaptic conjugate\n query = {'preIF': [conjugate_name], 'preIF_z': [2],\n 'postIF': [target_name], 'postIF_z': [1],\n 'punctumSize': 2}\"\"\"\n\n \"\"\"Example of a postsynaptic target and postsynaptic conjugate\n query = {'preIF': [], 'preIF_z': [],\n 'postIF': [target_name,conjugate_name], 'postIF_z': [1,2],\n 'punctumSize': 2}\"\"\"\n\n \"\"\"Example of a presynaptic target and postsynaptic conjugate\n query = {'preIF': [target_name], 'preIF_z': [1],\n 'postIF': [conjugate_name], 'postIF_z': [2],\n 'punctumSize': 2}\"\"\"\n\n\n query_list.append(query)\n\n\n #The following n samples are controls - you can add as many of these as you want by copying the block of code and pasting it after the last one\n #The notes in the following block of code apply to all of the controls\n n = 17 #well number of control sample\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet #Do not change\n reference_fn_str = 'GAD2' #String segment to search in a filename #refernce_fn_str is the project number/name of RB control\n target_fn_str = 'L106' #target_fn_str is the project number of the Ms control you are using\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n) #Do not alter\n conjugate_filenames.append(conjugate_name) #Do not alter\n target_filenames.append(target_name) #Do not alter\n query = {'preIF': [conjugate_name], 'preIF_z': [2], 'postIF': [target_name], 'postIF_z': [1], 'punctumSize': 2} #Se the examples and explanations above about \"query\"\n query_list.append(query) #Do not change\n\n n = 18\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet\n reference_fn_str = 'GAD2' #String segment to search in a filename\n target_fn_str = 'SP2'\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n)\n conjugate_filenames.append(conjugate_name)\n target_filenames.append(target_name)\n query = {'preIF': [target_name,conjugate_name], 'preIF_z': [1,2], 'postIF': [], 'postIF_z': [], 'punctumSize': 2}\n query_list.append(query)\n\n n = 19\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet\n reference_fn_str = 'NP-RB' #String segment to search in a filename\n target_fn_str = 'NP-MS'\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n)\n conjugate_filenames.append(conjugate_name)\n target_filenames.append(target_name)\n query = {'preIF': [], 'preIF_z': [], 'postIF': [target_name,conjugate_name], 'postIF_z': [1,2], 'punctumSize': 2}\n query_list.append(query)\n\n n = 20\n folder_names.append('Control' + str(n)) # Collate 'dataset' names for excel sheet\n reference_fn_str = 'NPNS-RB' #String segment to search in a filename\n target_fn_str = 'NPNS-MS'\n conjugate_name, target_name = aa.findFilenames(reference_fn_str, target_fn_str, filenames, n)\n conjugate_filenames.append(conjugate_name)\n target_filenames.append(target_name)\n query = {'preIF': [], 'preIF_z': [], 'postIF': [target_name,conjugate_name], 'postIF_z': [1,2], 'punctumSize': 2}\n query_list.append(query)\n\n\n \n measure_list = aa.calculate_measure_lists(query_list, None, base_dir,\n thresh, resolution, target_filenames) # Run all the queries\n\n df = aa.create_df(measure_list, folder_names, target_filenames, conjugate_filenames) #Do not change\n print(df) #Do not change\n\n return df #Do not change", "def main():\n sleep(3)\n diceresult = randint(1, 6)\n if diceresult == 6:\n raise Exception('The \"data retrieval\" flaked out. :(')\n else:\n share_dir = os.environ['CYLC_WORKFLOW_SHARE_DIR']\n cycle_point = os.environ['CYLC_TASK_CYCLE_POINT']\n fp = Path(f'{share_dir}/{cycle_point}.dat')\n fp.write_text(f'diceresult = {diceresult}')", "def AllindividualRuns():\n #800 nm\n RunData(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'), out='I800nm')\n RunData(getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/'), out='I800nm5k')\n RunData(getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/'), out='I800nm10k')\n RunData(getFiles(mintime=(15, 43, 24), maxtime=(15, 51, 47), folder='data/31Jul/'), out='I800nm20k')\n RunData(getFiles(mintime=(15, 56, 11), maxtime=(16, 02, 58), folder='data/31Jul/'), out='I800nm30k')\n RunData(getFiles(mintime=(16, 12, 39), maxtime=(16, 18, 25), folder='data/31Jul/'), out='I800nm38k')\n RunData(getFiles(mintime=(16, 21, 52), maxtime=(16, 26, 16), folder='data/31Jul/'), out='I800nm50k')\n RunData(getFiles(mintime=(16, 32, 02), maxtime=(16, 35, 23), folder='data/31Jul/'), out='I800nm54k')\n #700 nm\n RunData(getFiles(mintime=(17, 20, 17), maxtime=(17, 33, 17), folder='data/30Jul/'), out='I700nm5k')\n RunData(getFiles(mintime=(17, 37, 35), maxtime=(17, 46, 51), folder='data/30Jul/'), out='I700nm9k')\n RunData(getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/'), out='I700nm52k')\n RunData(getFiles(mintime=(17, 58, 18), maxtime=(17, 59, 31), folder='data/30Jul/'), out='I700nm32k')\n #600 nm\n RunData(getFiles(mintime=(15, 22, 00), maxtime=(15, 36, 32), folder='data/30Jul/'), out='I600nm5k')\n RunData(getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/'), out='I600nm54k')\n RunData(getFiles(mintime=(15, 52, 07), maxtime=(16, 06, 32), folder='data/30Jul/'), out='I600nm10k')\n #890 nm\n RunData(getFiles(mintime=(13, 37, 37), maxtime=(13, 50, 58), folder='data/01Aug/'), out='I890nm5k')\n RunData(getFiles(mintime=(14, 00, 58), maxtime=(14, 11, 54), folder='data/01Aug/'), out='I890nm10k')\n RunData(getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/'), out='I890nm30k')\n RunData(getFiles(mintime=(14, 30, 03), maxtime=(14, 34, 37), folder='data/01Aug/'), out='I890nm50k')", "def main():\n\n\n\n skulls_folder = os.listdir(RAW_IMAGE_DIRECTORY)\n\n # fetch and sort the .mnc and .tag files\n mnc_files = [f for f in skulls_folder if 'mnc' in f]\n tag_files = [f for f in skulls_folder if 'tag' in f]\n mnc_names = [i.split('.mnc')[0] for i in mnc_files]\n \n mnc_files.sort()\n tag_files.sort()\n mnc_names.sort()\n\n # Process and package ndarrays as tuples inside npy file\n package_to_npy(RAW_IMAGE_DIRECTORY, mnc_files, tag_files, mnc_names)\n \n print('\\n' * 5)\n\n # Push the npy files to GCP Cloud Storage\n upload_to_gcp(PROCESSED_IMAGE_DIRECTORY, GCP_PROJECT_NAME, GCP_BUCKET_NAME)", "def main(config_path):\n logging.info(\"Loading raw data files. This may take a few minutes.\")\n config = yaml.load(config_path, yaml.SafeLoader)\n min_year = config[\"min_year\"]\n max_year = config[\"max_year\"]\n\n # The Eviction Lab data came in two files (2014-2016 and 2017-2018) with different formats, so\n # we need to read them in separately and merge them later.\n eviction_df = load_evictions_data(\n config[\"eviction_data_path\"],\n \"EXECUTED_DATE\",\n min_year,\n max_year,\n create_geoid = True\n )\n census_df = load_census_data(config[\"acs_data_path\"])\n\n # Generate the time series data and save output\n logging.info(\"Generating time series data (monthly counts of housing loss events).\")\n\n # Calculating eviction totals separately because number of evictions does not always equal number of rows in the df\n evictions_monthly = aggregate_evictions_using_rate_estimates(\n eviction_df,\n config[\"path_to_eviction_filing_rates\"],\n estimate_var = \"filings\",\n time_group = \"month\"\n )\n evictions_monthly = evictions_monthly[[\"eviction-filings\", \"month\"]].groupby(\"month\").sum().reset_index()\n\n\n # Creating timeseries df and merging on separate eviction counts\n timeseries_df = generate_time_series_df(eviction_df)\n timeseries_df = timeseries_df.merge(evictions_monthly, how=\"outer\")\n timeseries_df.drop_duplicates().to_csv(\n config[\"timeseries_output_csv_path\"], index=False\n )\n logging.info(\n \"Output timeseries CSV saved to %s.\" % config[\"timeseries_output_csv_path\"]\n )\n\n # Process evictions data--get totals/rates across the analysis period & totals/rates\n # by year\n eviction_df = aggregate_evictions_using_rate_estimates(\n eviction_df,\n config[\"path_to_eviction_filing_rates\"],\n estimate_var = \"filings\",\n time_group = \"year\"\n )\n eviction_years_df = create_year_cols_from_df(\n eviction_df,\n [\"evictions\", \"eviction-filings\", \"eviction-rate\"], # \"eviction-filing-rate\"\n {\"evictions\": \"total-evictions\"},\n \"year\",\n \"GEOID\",\n )\n\n eviction_totals = get_totals_across_years(eviction_df, \"GEOID\", \"evictions\").rename(\n columns={\"sum\": \"total-evictions\", \"mean\": \"avg-evictions\"}\n )\n eviction_filing_totals = get_totals_across_years(\n eviction_df, \"GEOID\", \"eviction-filings\",\n ).rename(columns={\"sum\": \"total-eviction-filings\", \"mean\": \"avg-eviction-filings\"})\n\n\n # Join evictions, mortgage, tax, and ACS data together into a single dataframe\n merged = (\n census_df\n .merge(eviction_totals, on=\"GEOID\", how=\"left\")\n .merge(eviction_df[[\"GEOID\"]].dropna().drop_duplicates(),on=\"GEOID\",how=\"left\",)\n .merge(eviction_years_df, on=\"GEOID\", how=\"left\")\n )\n\n merged[\"overall-city-eviction-rate\"] = ((np.sum(merged[\"avg-evictions\"][np.isfinite(merged[\"avg-evictions\"])]))/sum(merged[\"total-renter-occupied-households\"]))*100\n\n merged[\"avg-eviction-rate\"] = (\n merged[\"avg-evictions\"] / merged[\"total-renter-occupied-households\"]\n ) * 100\n\n merged[\"ratio-to-mean-eviction-rate\"] = (\n merged[\"avg-eviction-rate\"] / merged[\"overall-city-eviction-rate\"]\n )\n\n # Add geographic identifier columns\n merged = merged.rename(columns={\"GEOID\": \"census_tract_GEOID\"})\n merged[\"county_GEOID\"] = merged[\"census_tract_GEOID\"].apply(lambda x: x[:5])\n merged.loc[merged[\"county_GEOID\"] == \"36005\", \"county\"] = \"Bronx\"\n merged.loc[merged[\"county_GEOID\"] == \"36047\", \"county\"] = \"Brooklyn\"\n merged.loc[merged[\"county_GEOID\"] == \"36061\", \"county\"] = \"Manhattan\"\n merged.loc[merged[\"county_GEOID\"] == \"36081\", \"county\"] = \"Queens\"\n merged.loc[merged[\"county_GEOID\"] == \"36085\", \"county\"] = \"Staten Island\"\n merged[\"state\"] = \"New York\"\n\n\n # Write main output file to CSV\n merged.drop_duplicates().to_csv(config[\"output_csv_path\"], index=False)\n logging.info(\"Output CSV saved to %s.\" % config[\"output_csv_path\"])", "def main():\n parser = ArgumentParser(description=\"write to a file\")\n\n parser.add_argument(\"-i\",\"--input\", type=setup.is_valid_h5_file, required=True, nargs='+',\n help=\"path(s) of HDF5 master file(s)\")\n\n parser.add_argument(\"-b\",\"--beamcenter\", nargs=2, required=True,\n help=\"beam center in X and Y (two arguments)\")\n\n parser.add_argument(\"-r\",\"--oscillation\", type=float, default=1,\n help=\"oscillation angle per well, default = 1\")\n\n parser.add_argument(\"-d\",\"--distance\", type=float, default=100,\n help=\"detector distance in mm\")\n\n parser.add_argument(\"-w\",\"--wavelength\", type=float, default=1.216,\n help=\"Wavelength in Angstrom, default is 1.216\")\n\n parser.add_argument(\"-f\",\"--framesperdegree\", type=int, default=5,\n help=\"Number of frames per degree, default is 5\")\n\n parser.add_argument(\"-t\",\"--totalframes\", type=int, default=0,\n help=\"Total number of frames to be processed, default all\")\n\n parser.add_argument(\"--output\", default=os.getcwd(),\n help=\"Use this option to change output directory, default pwd\")\n\n parser.add_argument(\"-sg\",\"--spacegroup\", type=int, default=0,\n help=\"Space group\")\n\n parser.add_argument(\"-u\",\"--unitcell\", type=str, default=\"50 50 50 90 90 90\",\n help=\"unit cell\")\n\n argslist = parser.parse_args()\n for masterfile in argslist.input:\n master1= Master(argslist,masterfile)\n master1.printDataWells()", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def main(): \n for info_hash_record in info_hashs:\n get_and_save_bt_info(info_hash_record)", "def execute(cf):\n\n ##Ports and parameters\n train_set = cf.get_input(\"train_set\") #training set. Typically even_file\n test_set = cf.get_input(\"test_set\") #test set. Typically odd_file\n WM1 = cf.get_input(\"WM1\")\n WM2 = cf.get_input(\"WM2\")\n WM3 = cf.get_input(\"WM3\")\n WM4 = cf.get_input(\"WM4\")\n WM5 = cf.get_input(\"WM5\")\n WM6 = cf.get_input(\"WM6\")\n WM7 = cf.get_input(\"WM7\")\n WM8 = cf.get_input(\"WM8\")\n WM9 = cf.get_input(\"WM9\")\n WM10 = cf.get_input(\"WM10\")\n WM11 = cf.get_input(\"WM11\")\n WM12 = cf.get_input(\"WM12\")\n WM13 = cf.get_input(\"WM13\")\n WM14 = cf.get_input(\"WM14\")\n WM15 = cf.get_input(\"WM15\")\n WM16 = cf.get_input(\"WM16\")\n WM17 = cf.get_input(\"WM17\")\n WM18 = cf.get_input(\"WM18\")\n WM19 = cf.get_input(\"WM19\")\n WM20 = cf.get_input(\"WM20\")\n WMdir = cf.get_input(\"WMdir\")\n WMdir2 = cf.get_input(\"WMdir2\")\n basefreqs = cf.get_input(\"BaseFrequencies\")\n ufemodel_path = cf.get_input(\"UFEmodel\")\n\n bestWM = cf.get_output(\"BestWM\")\n log_file = cf.get_output(\"log_file\")\n interm = cf.get_output(\"intermediate\")\n\n genome = cf.get_parameter('genome', 'string')\n motevo_path = cf.get_parameter('motevo_path', 'string')\n aligned = cf.get_parameter(\"aligned\", \"boolean\")\n\n os.mkdir(interm)\n\n\n\n # Read stuff in\n WMs = [i for i in[WM1, WM2, WM3, WM4, WM5, WM6, WM7, WM8, WM9, WM10, WM11, WM12, WM13, WM14, WM15, WM16, WM17, WM18, WM19, WM20] if i]\n\n if WMdir:\n WMs += [os.path.join(WMdir, wm) for wm in os.listdir(WMdir)]\n\n if WMdir2:\n WMs += [os.path.join(WMdir2, wm) for wm in os.listdir(WMdir2)]\n\n f = open(basefreqs)\n ATfreq = float(f.readline().strip().split()[1])\n GCfreq = float(f.readline().strip().split()[1])\n f.close()\n\n\n # Compute stuff: optimal priors and then likelihood of test set\n optpriors = []\n logliks = []\n\n for i, WM in enumerate(WMs):\n\n wmlen = len(open(WM).readlines())-4\n\n # 1. Fit prior on training set with EM\n tag = 'fitP_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=1, bgorder=0, bgprior=0.99)\n r = runMotevo(motevo_path, train_set, params, WM, interm, tag)\n if r != 0:\n print 'motevo failed ', tag\n sys.exit(1)\n\n # prior file:\n # WM_name final_prior nr_of_sites density\n # /import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/PipeLineSource/TESTRUN/NRF1_Z2/OUTPUT/NRF1_FgBg-runmotevoPG2_1/Logo 0.016554 635.008 0.251863\n # background 0.983446 37724.8 0.748137\n # UFEwm 0 0 0\n\n optprior = float(open(priors).readlines()[1].split()[1])\n bgprior=(1-optprior)\n print bgprior\n\n # 2. Compute log-likelihood on test set with optimal prior from training set and without EM\n tag = 'compLL_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=0, bgorder=0, bgprior=bgprior)\n runMotevo(motevo_path, train_set, params, WM, interm, tag)\n\n a = loadtxt(loglikfile, usecols=[1])\n ll = sum(a)\n\n logliks.append(ll)\n optpriors.append(optprior)\n\n print logliks\n\n\n\n #replace name in WM file with bestWM\n lines = open(WMs[argmax(logliks)]).readlines()\n lines[1] = 'NA BestWM\\n'\n bwm = open(bestWM, 'w')\n bwm.write(''.join(lines))\n\n\n l = open(log_file, 'w')\n\n l.write('WM_name\\tWM_path\\tlog_likelihood\\topt_prior\\n')\n\n names = ['WM_%i\\t%s\\t%.4f\\t%s' %(i+1, WMs[i], logliks[i], optpriors[i]) for i in arange(len(WMs))]\n\n l.write('\\n'.join(names))\n l.close()\n\n\n return 0", "def main():\n\n # Experiment Start\n start_time = datetime.now()\n logger.info(\n '################ Bergson Team Experiment Start #################')\n logger.info(\n f'Starting Bergson Astro Pi team experiment at {start_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n\n '''\n # Load simple Conv2D AI Model\n logger.info(\"Loading AI Convolutional Model\")\n conv2D_model = load_model(\"Conv2D_TF114\")\n '''\n\n # Load TFLite Model\n logger.info(\"Loading TFLite Mobilenetv2 Model\")\n mobilenetv2_interpreter = load_tflite_model(\"./Mobilenetv2_TF114.tflite\")\n\n # Create Log File\n logger.info(f'Creating Log file at {str(data_file)}')\n with open(data_file, 'w') as f:\n writer = csv.writer(f)\n header = (\"Date/time\", \"Location\", \"Picture Name\", \"Predicted NO2\")\n writer.writerow(header)\n\n # Start Loop over 3 hours\n\n now_time = datetime.now()\n i = 0\n # run a loop for 2 minutes\n while (now_time < start_time + timedelta(minutes=175)):\n\n # Take Earth Picture\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n pic_name = f'bergson_img_{timestamp}.jpg'\n capture(rpi_cam, str(dir_path/pic_name))\n logger.info(f'Experiment Pipeline {i} on picture {pic_name}')\n\n # NDVI Preprocessing\n ndvi_image = get_ndvi(str(dir_path/pic_name))\n ndvi_image = np.expand_dims(ndvi_image, axis=2)\n\n # RGB Prepprocessing for expected shape by Mobilenetv2 - comment below line when using simple Conv2D model\n ndvi_rgb_image = get_ndvi_rgb(ndvi_image)\n\n '''\n # Do Inference with simple Conv2D AI Model\n prediction = make_inference(ndvi_image,conv2D_model)\n '''\n \n # Do Inference with TFLite Model\n ndvi_rgb_image = ndvi_rgb_image.astype('float32')\n prediction = make_tflite_inference(\n ndvi_rgb_image, mobilenetv2_interpreter)\n\n # Get Decoded Inference results\n decoded_prediction = decode_prediction(prediction)\n\n # Write Prediction as CSV to disk\n logger.info(\n f'Logging NO2 prediction \\\"{decoded_prediction}\\\" for {pic_name}')\n exif_data = get_img_exif(pic_name, iss, decoded_prediction)\n row = (exif_data['Date/Time'], exif_data['Location'],\n pic_name, exif_data['NO2'])\n with open(data_file, mode='a') as f:\n writer = csv.writer(f)\n writer.writerow(row)\n\n # update the current time\n now_time = datetime.now()\n i = i+1\n\n # End Loop over 3 hours\n\n # Experiment End\n end_time = datetime.now()\n logger.info(\n f'Finishing Bergson Astro Pi team experiment at {end_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n experiment_time = end_time - start_time\n logger.info(f'Bergson Astro Pi team experiment run time {experiment_time}')\n logger.info('################ Bergson Team Experiment End #################')", "def GenotypeGVCFs():\n #creates sbatch files to merge batches of batch_size genomics vcf\n cwd = os.getcwd()\n sbatch_files = []\n if not os.path.isdir(os.path.join(cwd, \"01_CombineGVCFs\")):\n sys.exit(\"Directory 01_CombineGVCFs does not exits exists, something went wrong here.\")\n if os.path.isdir(os.path.join(cwd, \"02_GenotypeGVCFs\")):\n print \"WARNING: 02_GenotypeGVCFs already present, assuming this step has been completed with success.\"\n return sbatch_files\n else:\n #create the folder structure\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"sbatch\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"std_err\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"std_out\"))\n os.mkdir(os.path.join(cwd, \"02_GenotypeGVCFs\", \"VCF\"))\n #Build the sbatch files for the join calling step\n working_dir = os.path.join(cwd, \"02_GenotypeGVCFs\")\n #now retrive the VCF stored in 01_CombineGVCFs/VCF/\n combined_gvcfs_to_process = []\n if len(CONFIG[\"intervals_list\"]) == 0:\n #no intervals, I have one file for each batch\n combined_gvcf_files = []\n for current_batch in range(1, CONFIG[\"batch_number\"] +1):\n # for each batch create the vcf file that need to be created by combine step\n combined_gvcf_name = \"{}_batch{}.g.vcf.gz\".format(CONFIG[\"output_header\"], current_batch)\n combined_gvcf_full_path = os.path.join(cwd, \"01_CombineGVCFs\", \"VCF\", combined_gvcf_name)\n combined_gvcf_files.append(combined_gvcf_full_path)\n combined_gvcfs_to_process.append(combined_gvcf_files)\n else:\n for interval in CONFIG[\"intervals_list\"]:\n interval_name = os.path.basename(interval).split(\".\")[0]\n combined_gvcf_files = []\n for current_batch in range(1, CONFIG[\"batch_number\"] +1):\n # for each batch create the vcf file that need to be created by combine step\n combined_gvcf_name = \"{}_batch{}_{}.g.vcf.gz\".format(CONFIG[\"output_header\"], current_batch, interval_name)\n combined_gvcf_full_path = os.path.join(cwd, \"01_CombineGVCFs\", \"VCF\", combined_gvcf_name)\n combined_gvcf_files.append(combined_gvcf_full_path)\n #now ceate a list with interval file and all gvcf to be combines\n interval_plus_gvcfs = [interval ,combined_gvcf_files]\n combined_gvcfs_to_process.append(interval_plus_gvcfs)\n for interval_plus_gvcfs in combined_gvcfs_to_process:\n interval = interval_plus_gvcfs[0]\n combined_gvcf_files = interval_plus_gvcfs[1]\n sbatch_file = build_GenotypeGVCFs_sbatch(working_dir, combined_gvcf_files, CONFIG[\"scratch\"], interval)\n sbatch_files.append(sbatch_file)\n return sbatch_files", "def preprocess():\n #get a list of all sentinel-image filenames\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n #read in a csv-file with information about the cluster\n csvpath = os.path.abspath(os.path.join(os.path.abspath(__file__),\"../../dataResearch/Data_with_Pooled.csv\"))\n df = pd.read_csv(csvpath)\n #get the min and max values per band \n minmaxlist = minmax()\n timelist = []\n print(\"STEP 2/2\")\n print(\"CREATING TFRECORDS\")\n for i in s2files:\n start = time.time()\n s2file = s2path + \"/\" + i\n #Get Features out of the Dataframe\n #get the name of the label (equals the SurveyID in the data)\n labelname = i.replace(\".tif\",\"\")\n #get the index of the entry to get the information out of the dataframe\n index = df.ID[df.ID == labelname].index\n wealthpooled = float(df['wealthpooled'].loc[index].max().replace(\",\",\".\"))\n wealthpooled5country = float(df['wealthpooled5country'].loc[index].max().replace(\",\",\".\"))\n country = bytes(df['country'].loc[index].max(), 'utf-8')\n urbanrural = bytes(df['URBAN_RURA'].loc[index].max(), 'utf-8')\n csvlat = float(df['LATNUM'].loc[index].max().replace(\",\",\".\"))\n csvlon = float(df['LONGNUM'].loc[index].max().replace(\",\",\".\"))\n year = int(df['year'].loc[index].max())\n wealth = float(df['wealth'].loc[index].max().replace(\",\",\".\"))\n #Get all Bands out of the GEOTIFF File\n s2raster = gdal.Open(s2file)\n bandlist = []\n for n in range(s2raster.RasterCount):\n f = n+1\n if n not in [13,14,15]:\n s2band = s2raster.GetRasterBand(f)\n s2band = s2band.ReadAsArray()\n s2band = np.resize(s2band,(1050,1050)).flatten()\n min = minmaxlist[n][0]\n max = minmaxlist[n][1]\n s2band = (s2band-min)/(max-min)\n bandlist.append(s2band.flatten())\n #get the Nightlight Band out of the GEOTIFF File\n nlfile = nlpath + \"/\" + i\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n nlband = nlband.ReadAsArray()\n nlband = np.resize(nlband,(1050,1050)).flatten()\n min = minmaxlist[13][0]\n max = minmaxlist[13][1]\n nlband = (nlband-min)/(max-min)\n bandlist.append(nlband)\n #create a TFRecords-File with the TFRecordWriter\n with tf.io.TFRecordWriter(exportpath + '/' + labelname + '.tfrec') as writer:\n example = serialize_example(B1=bandlist[0],\n B2=bandlist[1],\n B3=bandlist[2],\n B4=bandlist[3],\n B5=bandlist[4],\n B6=bandlist[5],\n B7=bandlist[6],\n B8=bandlist[7],\n B8A=bandlist[8],\n B9=bandlist[9],\n B10=bandlist[10],\n B11=bandlist[11],\n B12=bandlist[12],\n NL=bandlist[13],\n wealth=wealth,\n wealthpooled=wealthpooled,\n wealthpooled5country=wealthpooled5country,\n country=country,\n urbanrural=urbanrural,\n lon_coord=csvlon,\n lat_coord=csvlat,\n year=year)\n writer.write(example)\n end = time.time()\n timelist.append(end-start)\n print(\"Done!\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%d:%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))", "def testC_ACDCTest(self):\n workload = self.createTestWorkload()\n dcs = DataCollectionService(url=self.testInit.couchUrl, database=self.testInit.couchDbName)\n\n testFileA = File(lfn=makeUUID(), size=1024, events=1024, locations=\"T1_US_FNAL_Disk\")\n testFileA.addRun(Run(1, 1, 2))\n testFileA.create()\n testFileB = File(lfn=makeUUID(), size=1024, events=1024, locations=\"T1_US_FNAL_Disk\")\n testFileB.addRun(Run(1, 3))\n testFileB.create()\n testJobA = getJob(workload)\n testJobA.addFile(testFileA)\n testJobA.addFile(testFileB)\n\n testFileC = File(lfn=makeUUID(), size=1024, events=1024, locations=\"T1_US_FNAL_Disk\")\n testFileC.addRun(Run(1, 4, 6))\n testFileC.create()\n testJobB = getJob(workload)\n testJobB.addFile(testFileC)\n\n testFileD = File(lfn=makeUUID(), size=1024, events=1024, locations=\"T1_US_FNAL_Disk\")\n testFileD.addRun(Run(1, 7))\n testFileD.create()\n testJobC = getJob(workload)\n testJobC.addFile(testFileD)\n\n testFileE = File(lfn=makeUUID(), size=1024, events=1024, locations=\"T1_US_FNAL_Disk\")\n testFileE.addRun(Run(1, 11, 12))\n testFileE.create()\n testJobD = getJob(workload)\n testJobD.addFile(testFileE)\n\n testFileF = File(lfn=makeUUID(), size=1024, events=1024, locations=\"T1_US_FNAL_Disk\")\n testFileF.addRun(Run(2, 5, 6, 7))\n testFileF.create()\n testJobE = getJob(workload)\n testJobE.addFile(testFileF)\n\n testFileG = File(lfn=makeUUID(), size=1024, events=1024, locations=\"T1_US_FNAL_Disk\")\n testFileG.addRun(Run(2, 10, 11, 12))\n testFileG.create()\n testJobF = getJob(workload)\n testJobF.addFile(testFileG)\n\n testFileH = File(lfn=makeUUID(), size=1024, events=1024, locations=\"T1_US_FNAL_Disk\")\n testFileH.addRun(Run(2, 15))\n testFileH.create()\n testJobG = getJob(workload)\n testJobG.addFile(testFileH)\n\n testFileI = File(lfn=makeUUID(), size=1024, events=1024, locations=\"T1_US_FNAL_Disk\")\n testFileI.addRun(Run(3, 20))\n testFileI.create()\n testJobH = getJob(workload)\n testJobH.addFile(testFileI)\n\n testFileJ = File(lfn=makeUUID(), size=1024, events=1024, locations=\"T1_US_FNAL_Disk\")\n testFileJ.addRun(Run(1, 9))\n testFileJ.create()\n testJobI = getJob(workload)\n testJobI.addFile(testFileJ)\n\n # dcs.failedJobs([testJobA, testJobB, testJobC, testJobD, testJobE,\n # testJobF, testJobG, testJobH, testJobI])\n\n dcs.failedJobs([testJobA, testJobD, testJobH])\n\n baseName = makeUUID()\n\n testFileset = Fileset(name=baseName)\n testFileset.create()\n testFileset.addFile(testFileA)\n testFileset.addFile(testFileB)\n testFileset.addFile(testFileC)\n testFileset.addFile(testFileD)\n testFileset.addFile(testFileE)\n testFileset.addFile(testFileF)\n testFileset.addFile(testFileG)\n testFileset.addFile(testFileH)\n testFileset.addFile(testFileI)\n testFileset.addFile(testFileJ)\n testFileset.commit()\n\n testSubscription = Subscription(fileset=testFileset,\n workflow=self.testWorkflow,\n split_algo=\"LumiBased\",\n type=\"Processing\")\n testSubscription.create()\n\n splitter = SplitterFactory()\n jobFactory = splitter(package=\"WMCore.WMBS\",\n subscription=testSubscription)\n\n jobGroups = jobFactory(lumis_per_job=100,\n halt_job_on_file_boundaries=False,\n splitOnRun=True,\n collectionName=workload.name(),\n filesetName=workload.getTask(\"reco\").getPathName(),\n owner=\"evansde77\",\n group=\"DMWM\",\n couchURL=self.testInit.couchUrl,\n couchDB=self.testInit.couchDbName,\n performance=self.performanceParams)\n\n self.assertEqual(jobGroups[0].jobs[0]['mask'].getRunAndLumis(), {1: [[1, 2], [3, 3], [11, 12]]})\n self.assertEqual(jobGroups[0].jobs[1]['mask'].getRunAndLumis(), {3: [[20, 20]]})\n\n return", "def main():\n\n house_path = '../../Data/wijk1_huizen.csv'\n battery_path = '../../Data/wijk1_batterijen.txt'\n\n houses, batteries = read_data(house_path, battery_path)\n\n smart_wijk = SmartGrid(51,51)\n smart_wijk.add_house_dictionaries(houses)\n smart_wijk.add_battery_dictionaries(batteries)\n\n for element in houses:\n smart_wijk.create_house(element['position'], element['output'])\n for element in batteries:\n smart_wijk.create_battery(element['position'], element['capacity'])\n\n solution_reader(smart_wijk, '../../Results/best_brabo_solution.csv')", "def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )", "def main():\n\n pathfolder = \"/home/vanessa/DATA_SEEG/PKL_FILE/\"\n filename = \"/data.pkl\"\n # pathfolder = argv[1]\n # filename = argv[2]\n\n ti = 10. # initial time\n tf = 590. # final time\n t_split = 300. # split\n fs = 1000. # sampling frequency\n powerline = 50.\n\n thresholds = np.load(\"threshold.npy\") # load the threshold file\n meanthresh = thresholds.mean(axis=0)[1::2]\n stdthresh = thresholds.std(axis=0)[1::2]\n\n # features = 159 # classification features + (x,y,z)-coordinates\n\n for ii, id in enumerate(os.listdir(pathfolder)):\n\n print(id)\n\n df = pd.read_pickle(pathfolder + id + filename)\n\n validchannels = np.where(~df.loc[:, \"PTD\"].isnull())[0] # remove NaN values\n\n df = df.iloc[validchannels, :]\n _, p = df.shape\n\n timeseries = df.values[:, :-5] # we are not considering Y, ptd, coordinates\n\n data = remove_powerline(timeseries, fs) # remove power line effects\n\n #################### split into 2 fragments ############################\n\n split1half = data[:, int(fs*ti):int(fs*t_split)]\n split2half = data[:, int(fs*t_split):int(fs*tf)]\n\n timefeat1half = merge_temporal_features(split1half, fs, powerline,\n meanthresh)\n timefeat2half = merge_temporal_features(split2half, fs, powerline,\n meanthresh)\n\n ########################################################################\n\n cc = [df.index[t] for t in range(len(df.index))]\n arrays = [[id]*(2*len(df.index)), cc + cc]\n\n tuples = list(zip(*arrays))\n index = pd.MultiIndex.from_tuples(tuples, names=['patient', 'channel'])\n\n # temporal features from SEEG\n timefeatdf = pd.DataFrame(data=np.vstack((timefeat1half,\n timefeat2half)), index=index)\n\n # spatial features for MRI\n spacefeat = df.values[:, -4:]\n spacefeatdf = pd.DataFrame(data=np.vstack((spacefeat, spacefeat)),\n index=index, columns=\n ['PTD', 'xcoor', 'ycoor', 'zcoor'])\n\n # y labels\n ylab = df.values[:, -5]\n Ylabel = pd.DataFrame(data=np.append(ylab, ylab), index=index,\n columns=[\"Y\"])\n\n # pickle file in output\n outputpkl = pd.concat([timefeatdf, spacefeatdf, Ylabel], axis=1)\n\n outputpkl.to_pickle(pathfolder + id + \"/features.pkl\")\n\n if ii == 0:\n ddd = outputpkl\n else:\n ddd = pd.concat([ddd, outputpkl], axis=0)\n\n ddd.to_pickle(pathfolder + \"classificationset.pkl\")", "def wextract_worker(region_list, evt2_file, pbk_file, asphist_file,\n msk_file, bpix_file, root=None, bg_file=None,\n bg_region=None, binning=None, clobber=False,\n quiet=False, pfiles_number=None):\n nreg = len(region_list)\n binarfwmap = 1 # set this to 2 or more if there are memory problems\n\n # Check if clobber is set\n if clobber:\n clobbertext = 'clobber=yes'\n else:\n clobbertext = ''\n\n # Check if binning is set\n if binning is not None:\n binningtext1 = 'binspec=' + str(binning)\n binningtext2 = 'grouptype=NUM_CTS'\n else:\n binningtext1 = 'binspec=NONE'\n binningtext2 = 'grouptype=NONE'\n\n # Check if pfiles_number is set. Specifying this ensures that multiple\n # CIAO instances run simultaneously without conflict by setting the PFILES\n # environment variable to a temporary directory (although it seems to\n # work fine even without it since pset is not currently used in this script).\n env = os.environ.copy()\n if pfiles_number is not None:\n pfile_dir = './cxcds_param' + str(pfiles_number)\n if not os.path.exists(pfile_dir):\n os.mkdir(pfile_dir)\n # Set PFILES to a temporary directory in the current directory (note use of ; and :)\n env[\"PFILES\"] = pfile_dir+';'+env[\"ASCDS_CONTRIB\"]+'/param'+':'+env[\"ASCDS_INSTALL\"]+'/param'\n\n # Now loop over regions and do extraction\n for i, region in enumerate(region_list):\n if os.path.isfile(region):\n print('Extracting spectra/responses from region ' + region + '...')\n\n # Define output file names (follow acisspec conventions)\n if root is None:\n pi_root = os.path.splitext(region)[0] + '_sou'\n pi_file = pi_root + '.pi'\n bg_pi_file = os.path.splitext(region)[0] + '_bgd.pi'\n else:\n if nreg == 1:\n pi_root = root + '_sou'\n pi_file = pi_root + '.pi'\n bg_pi_file = root + '_bgd.pi'\n else:\n pi_root = root + '_' + str(i) + '_sou'\n pi_file = pi_root + '.pi'\n bg_pi_file = root + str(i) + '_bgd.pi'\n\n # Extract source spectrum and make responses using SPECEXTRACT\n evt2_filter = evt2_file + '[sky=region(' + region + ')]'\n cmd = ['punlearn', 'specextract']\n p = subprocess.call(cmd, env=env)\n if bg_file is None and bg_region is not None:\n bgtext = 'bkgfile=' + evt2_file + \\\n '[sky=region(' + bg_region + ')]'\n else:\n bgtext = 'bkgfile=NONE'\n\n cmd = ['specextract', evt2_filter, 'outroot='+pi_root, 'asp='+asphist_file, 'mskfile='+msk_file, 'badpixfile='+bpix_file, 'weight=yes', 'bkgresp=no', 'correct=no', 'combine=no', 'dafile=CALDB', bgtext, binningtext1, binningtext2, clobbertext, 'binarfwmap='+str(binarfwmap)]\n\n if quiet:\n p = subprocess.call(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n p = subprocess.call(cmd, env=env)\n\n # Extract background spectrum if a bg_file is given and a source spectrum was made\n if bg_file is not None and os.path.isfile(pi_file):\n cmd = ['punlearn', 'dmextract']\n p = subprocess.call(cmd, env=env)\n bg_filter = bg_file + '[sky=region(' + region + ')][bin PI]'\n cmd = ['dmextract', 'infile=' + bg_filter, 'outfile=' + bg_pi_file, 'opt=pha1', 'error=gaussian', 'bkgerror=gaussian', 'wmap=[energy=300:2000][bin det=8]', clobbertext]\n if quiet:\n p = subprocess.call(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # if quiet==True, simply redirect stdout and stderr to pipe\n else:\n p = subprocess.call(cmd, env=env)\n\n # Update header of source PI file with bg_file\n if os.path.isfile(pi_file):\n cmd = ['punlearn', 'dmhedit']\n p = subprocess.call(cmd, env=env)\n cmd = ['dmhedit', 'infile='+pi_file, 'filelist=', 'operation=add', 'key=BACKFILE', 'value='+bg_pi_file]\n if quiet:\n p = subprocess.call(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n p = subprocess.call(cmd, env=env)\n if binning is not None:\n cmd = ['dmhedit', 'infile='+pi_root+'_grp.pi', 'filelist=', 'operation=add', 'key=BACKFILE', 'value='+bg_pi_file]\n if quiet:\n p = subprocess.call(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n p = subprocess.call(cmd, env=env)\n\n else:\n print('Region file ' + region + ' not found! No extraction done for this region.')", "def main():\r\n\r\n print(\"\\nTask 4a:\")\r\n days = cold_days([1, -5, 3, 0, -6, -3, 15, 0])\r\n print(days)\r\n\r\n print(\"\\nTask 4b:\")\r\n A = [-70, 30, 0, 90, 23, -12, 95, 12]\r\n result = cap_data(A, -50, 50)\r\n print(result)\r\n\r\n print(\"\\nTask 4c:\")\r\n for i in range(4):\r\n print(generate_testdata(10, -5, 10))\r\n\r\n print(\"\\nTask 4d:\")\r\n temp = [1, 5, 3]\r\n rain = [0, 30, 120]\r\n humidity = [30, 50, 65]\r\n wind = [3, 5, 7]\r\n weather = create_db(temp, rain, humidity, wind)\r\n print(weather)\r\n\r\n print(\"\\nTask 4e:\")\r\n print_db(weather)\r\n\r\n print(\"\\nTask 4f:\")\r\n temp = [1, 3, 4, -5, -6, -7, -8, -9, 3, 0]\r\n rain = [0, 20, 30, 0, 10, 30, 50, 0, 5, 2]\r\n print(strange_weather(temp, rain))", "def builddataframe(brick, path = \"..\", cutstring = \"1\", major = 0, minor = 0, newzprojection = None, charmsim = False):\n nplate =0\n\n print(\"Reading ScanSet at path \",path)\n\n #reading scanset\n sproc = r.EdbScanProc()\n sproc.eProcDirClient=path\n id = r.EdbID(brick,nplate,major,minor)\n ss = sproc.ReadScanSet(id)\n ss.Brick().SetID(brick)\n \n #preparing patterns\n npl = ss.eIDS.GetEntries()\n\n cut = r.TCut(cutstring)\n\n #intial empty arrays\n IDall = np.zeros(0,dtype=int)\n PIDall = np.zeros(0,dtype=int)\n\n xall = np.zeros(0,dtype=np.float32)\n yall = np.zeros(0,dtype=np.float32)\n zall = np.zeros(0,dtype=np.float32)\n TXall = np.zeros(0,dtype=np.float32)\n TYall = np.zeros(0,dtype=np.float32)\n\n MCEvtall = np.zeros(0,dtype=int)\n MCTrackall = np.zeros(0,dtype=int)\n Pall = np.zeros(0,dtype=np.float32)\n Flagall = np.zeros(0,dtype=int)\n\n print (\"Cut on couples \")\n cut.Print()\n\n print(\"Try to open folders at path \",path+\"/b00000\"+str(brick))\n for i in range(npl):\n idplate = ss.GetID(i)\n \n nplate = idplate.ePlate\n plate = ss.GetPlate(idplate.ePlate)\n #read pattern information\n p = r.EdbPattern()\n\n ect = r.EdbCouplesTree()\n if (nplate) <10:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p00{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n else:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p0{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n\n #addingcut\n ect.eCut = cut \n cutlist = ect.InitCutList()\n \n nsegcut = cutlist.GetN()\n nseg = ect.eTree.GetEntries()\n\n IDarray_plate = np.zeros(nsegcut,dtype=int)\n PIDarray_plate = np.zeros(nsegcut,dtype=int)\n\n xarray_plate = np.zeros(nsegcut,dtype=np.float32)\n yarray_plate = np.zeros(nsegcut,dtype=np.float32)\n zarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TXarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TYarray_plate = np.zeros(nsegcut,dtype=np.float32)\n \n MCEvtarray_plate = np.zeros(nsegcut,dtype=int)\n MCTrackarray_plate = np.zeros(nsegcut,dtype=int)\n Parray_plate = np.zeros(nsegcut,dtype=np.float32)\n Flagarray_plate = np.zeros(nsegcut,dtype=int)\n\n print (\"loop on {} segments over {} for plate {}\".format(nsegcut, nseg,nplate))\n for ientry in range(nsegcut):\n iseg = cutlist.GetEntry(ientry)\n ect.GetEntry(iseg)\n \n seg=ect.eS\n #//setting z and affine transformation\n seg.SetZ(plate.Z())\n seg.SetPID(i)\n seg.Transform(plate.GetAffineXY())\n\n if(newzprojection is not None):\n seg.PropagateTo(newzprojection[i])\n\n IDarray_plate[ientry] = seg.ID()\n PIDarray_plate[ientry] = seg.PID()\n \n xarray_plate[ientry] = seg.X()\n yarray_plate[ientry] = seg.Y()\n zarray_plate[ientry] = seg.Z()\n TXarray_plate[ientry] = seg.TX()\n TYarray_plate[ientry] = seg.TY()\n\n MCEvtarray_plate[ientry] = seg.MCEvt()\n MCTrackarray_plate[ientry] = seg.MCTrack()\n Parray_plate[ientry] = seg.P() \n if charmsim: #different place where pdgcode is stored\n Flagarray_plate[ientry] = seg.Vid(0)\n else:\n Flagarray_plate[ientry] = seg.Flag() \n\n #end of loop, storing them in global arrays\n IDall = np.concatenate((IDall,IDarray_plate),axis=0)\n PIDall = np.concatenate((PIDall,PIDarray_plate),axis=0)\n\n xall = np.concatenate((xall,xarray_plate),axis=0)\n yall = np.concatenate((yall,yarray_plate),axis=0)\n zall = np.concatenate((zall,zarray_plate),axis=0)\n TXall = np.concatenate((TXall,TXarray_plate),axis=0)\n TYall = np.concatenate((TYall,TYarray_plate),axis=0)\n MCEvtall = np.concatenate((MCEvtall,MCEvtarray_plate),axis=0)\n MCTrackall = np.concatenate((MCTrackall,MCTrackarray_plate),axis=0)\n Pall = np.concatenate((Pall,Parray_plate),axis=0)\n Flagall = np.concatenate((Flagall,Flagarray_plate),axis=0)\n\n data = {'ID':IDall,'PID':PIDall,'x':xall,'y':yall,'z':zall,'TX':TXall,'TY':TYall,'MCEvent':MCEvtall,'MCTrack':MCTrackall,'P':Pall,'Flag':Flagall}\n df = pd.DataFrame(data, columns = ['ID','PID','x','y','z','TX','TY','MCEvent','MCTrack','P','Flag'] )\n\n return df", "def main():\n\n # Log messages to stdout\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n stream=sys.stdout,\n )\n\n # Load the sample dataset: the US states and their corresponding population number.\n # (data from https://www.census.gov/)\n us_states_path = os.path.join(os.getcwd(), \"sample_data\", \"cb_2018_us_state_5m.shp\")\n us_pop_path = os.path.join(os.getcwd(), \"sample_data\", \"nst-est2019-01.xlsx\")\n us_states = gpd.read_file(us_states_path)\n us_inhab = pd.read_excel(us_pop_path, skiprows=3, engine=\"openpyxl\").add_prefix(\n \"pop_\"\n )\n # Tidy up rows and column names\n us_inhab.rename(columns={us_inhab.columns[0]: \"NAME\"}, inplace=True)\n us_inhab.NAME = us_inhab.NAME.str.replace(\".\", \"\")\n # Join population numbers and us state geometries.\n us_states = us_states.merge(us_inhab, on=\"NAME\").reset_index()\n # Inspect the data\n print(us_states.info())\n\n # Initialize a circle style cartogram for inhabitants per state in 2019.\n circle_cg = CircleCartogram(\n gdf=us_states,\n size_column=\"pop_2019\",\n mode=2,\n time_limit=60, # The total amount of seconds the model is allowed to run. Useful for working with mode 3.\n )\n square_cg = SquareCartogram(\n gdf=us_states,\n size_column=\"pop_2019\",\n mode=1,\n time_limit=60, # The total amount of seconds the model is allowed to run. Useful for working with mode 3.\n )\n square2_cg = SquareCartogram(\n gdf=us_states,\n size_column=\"pop_2019\",\n mode=4,\n time_limit=60, # The total amount of seconds the model is allowed to run. Useful for working with mode 3.\n )\n\n # Calculate the cartogram geometries.\n circle_cg.calculate()\n square_cg.calculate()\n square2_cg.calculate()\n\n # Plot both the original map and the cartogram side by side.\n gdfs = [us_states, circle_cg.gdf, square_cg.gdf, square2_cg.gdf]\n m = Map(\n gdfs=gdfs,\n title=\"Population per US State in 2019\",\n column=\"pop_2019\",\n labels=\"STUSPS\",\n )\n m.ax[0][0].set_xlim(-150, -60)\n m.plot()\n plt.show()" ]
[ "0.6137783", "0.610961", "0.6097006", "0.6071778", "0.59867096", "0.5944537", "0.59281546", "0.5890876", "0.5888353", "0.58699083", "0.5863766", "0.5852129", "0.58403766", "0.58233374", "0.58191043", "0.5813233", "0.5811937", "0.5777856", "0.5767777", "0.5765127", "0.57320464", "0.5724286", "0.572312", "0.5699994", "0.56938756", "0.56784886", "0.56725454", "0.5670405", "0.566777", "0.5659553", "0.5650505", "0.5647334", "0.5642317", "0.5640165", "0.563935", "0.5621144", "0.5610774", "0.5605963", "0.56027514", "0.5600403", "0.55994374", "0.5597945", "0.55974704", "0.5580568", "0.55783415", "0.5566575", "0.556298", "0.55545735", "0.5545194", "0.55430114", "0.5536334", "0.55253303", "0.55250657", "0.55210483", "0.5518609", "0.5516895", "0.55120164", "0.55035496", "0.55018556", "0.549652", "0.5493933", "0.5488552", "0.5483436", "0.5482197", "0.5479517", "0.5473252", "0.54715115", "0.5471012", "0.546292", "0.546271", "0.545488", "0.5453907", "0.5450715", "0.5450394", "0.5446454", "0.5444491", "0.5442255", "0.543885", "0.54374963", "0.5433551", "0.5430806", "0.5427362", "0.54232675", "0.542322", "0.5419585", "0.54184383", "0.54129773", "0.54114926", "0.53996724", "0.5395247", "0.5395008", "0.5383791", "0.5383286", "0.53831786", "0.5380034", "0.53763723", "0.5371245", "0.5364242", "0.5362469", "0.536043" ]
0.6892021
0
Zwraca "x/y" lub "x" dla y=1
def __str__(self): if self.y == 1: return "{}".format(self.x) else: return "{}/{}".format(self.x, self.y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def divide(x, y):\n\n return x / y", "def division(x, y):\n return x / y", "def divide(x, y):\n return x / y", "def divide(x, y):\n return x / y", "def divide(x, y):\n return x / y", "def prop(x,y):\n return 1 / (1/x + 1/y - 1)", "def divide(x, y):\n return round(x / y)", "def div(x, y):\n return x / y", "def divide(x, y):\n assert y != 0\n if x == 1: return 0, 1\n q, r = divide(x >> 1, y)\n q *= 2\n r *= 2\n if x & 1: r += 1\n if r >= y:\n q += 1\n r -= y\n return q, r", "def get(self, x: int, y: int, /) -> int:", "def compute_fraction(x, y): \r\n if x == 'NaN' or y == 'NaN':\r\n return 0.\r\n if x == 0 or y == 0: \r\n return 0\r\n fraction = x / y\r\n return fraction", "def tryDivide(x, y):\r\n s = 0.0\r\n if y != 0.0: s = x / y\r\n return s", "def my_func(x, y):\n return (1 / x) * my_func(x, y + 1) if y < -1 else 1 / x", "def division(self, x,y,a,b):\n real = (a*x + b*y)/(a*a + b*b)\n img = (a*y - b*x)/(a*a + b*b)\n return real, img", "def root_1(a, b):\n return -b / a", "def divide_and_round_up(x, y):\n return ((x - 1) // y) + 1", "def di(o1, o2):\n return o1/o2", "def egim_hesapla(x1, y1, x2, y2):\n\tsonuc = (y2 - y1) / (x2 - x1)\n\tprint float(sonuc)", "def le(self, x, y):", "def resta(x, y):\n return x - y", "def divide(x, y):\n # TODO: Find out if there is a way to work around divide by zero error\n try:\n if x or y == 0:\n print(\"0\")\n print(x, '/', y, '=', x / y)\n menu()\n except Exception as e:\n expt(e.args)", "def g(x, y):\n\n \"\"\" Xrhsimopoihste infix telestes(+, -, *, **, ...)\n OXI ekfraseis klhshs \"\"\"\n return ((((x+y)**2)+((x-y)**2))**(1/2))", "def scalar_function(x, y):\n if x <= y:\n return x*y\n else:\n return x/y", "def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4", "def division(a, b):\n return (a // b, a / b)", "def the_division_is_aimed(numb1, numb2):\r\n return f\"Your result: {numb1//numb2}\"", "def percentage(x, y):\n try:\n return 100 * (float(x) / y)\n except ZeroDivisionError:\n return \"undefined\"", "def harm(x,y):\n return x*y/(x+y)", "def expected(x, y):", "def expected(x, y):", "def expected(x, y):", "def division(x, y, val = 0.0):\n if y != 0.0:\n val = float(x)/y\n return val", "def divide_by_2(x):\n\treturn x / 2", "def division(a, b):\n if b != 0:\n return a//b", "def rat2cont_quot(x, y):\n\tcont = []\n\twhile y != 0:\n\t\tcont.append(x // y)\n\t\tx, y = y, x % y\n\treturn cont", "def linear_function(x, y):\n\n return x + y / 2.", "def div1(left: float, right: float) -> float:\n return left / right", "def id_case(x,y):\n try:\n assert (100<x<1100 and 200<y<700)\n a=int((x-100)/cote)*cote+100\n b=int((y-200)/cote)*cote+200\n return (a,b)\n except AssertionError:\n print(x,y)\n print(\"Le couple a identifier n'est pas dans le rectangle\")", "def fn(x, y):\n x, y = abs(x), abs(y) # symmetry \n if x == y == 0: return 0 \n if x + y == 2: return 2\n return 1 + min(fn(x-2, y-1), fn(x-1, y-2))", "def YtoX2(x1, y):\n return 0.5 * y * (x1**2 + 1) - x1**2", "def divide(self, a, b):\n return a / b", "def divide(a, b):\n return a / b", "def soma(x, y):\n return x + y", "def find_ordinate(x_value, a_value, b_value, field):\n\n # y_value may be found by simple substitution of x_value in the equation\n return (int(pow(x_value, 3, field)) + a_value * x_value + b_value) % field", "def modulo(x, y) :\n if (x / y) < 1:\n return x\n else:\n return modulo(x - y, y)", "def test_expression(x, y, z):\n return x * y + y / z", "def interpol(self,x,y,x1):\n \n N = len(x)\n i = np.minimum(np.maximum(np.searchsorted(x,x1,side='right'),1),N-1)\n xl = x[i-1]\n xr = x[i]\n yl = y[i-1]\n yr = y[i]\n y1 = yl + (yr-yl)/(xr-xl) * (x1-xl)\n above = x1 > x[-1]\n below = x1 < x[0]\n y1 = np.where(above,y[-1] + (x1 - x[-1]) * (y[-1]-y[-2])/(x[-1]-x[-2]), y1)\n y1 = np.where(below,y[0],y1)\n \n return y1, i", "def ge (x,y):\n\n return le(y,x)", "def divide1(a, b):\n if b == 0:\n raise ValueError(\"Zero division Error!\")\n return a * 1.0 / b", "def scalar_divide(x, y):\n if len(list(x.size())) == 2 or len(list(x.size())) == 1:\n y_star = torch.zeros_like(y)\n y_star[0] = y[0]\n y_star[1] = -y[1]\n\n numerator = scalar_mult(y_star, x)\n denominator = scalar_mult(y, y_star)[0]\n\n if len(list(x.size())) == 3:\n y_star = torch.zeros_like(y)\n y_star[0] = y[0]\n y_star[1] = -y[1]\n\n numerator = scalar_mult(y_star, x)\n denominator = scalar_mult(y, y_star)[0]\n\n return numerator / denominator", "def exquo(self, a, b):\n return a // b", "def liner_function_integer(x1, y1, x2, y2):\n\n def gcd(a, b):\n \"\"\"最大公約数\"\"\"\n a, b = (a, b) if a >= b else (b, a)\n if b == 0:\n return a\n return gcd(b, a % b)\n\n if x1 == x2:\n # y軸に平行な直線の場合\n a = 1\n b = 0\n c = - x1\n elif y1 == y2:\n # x軸に平行な直線の場合\n a = 0\n b = 1\n c = - y1\n else:\n b = 1 * (x1 - x2)\n a = - (y1 - y2)\n c = - a * x1 - b * y1\n g = gcd(gcd(abs(a), abs(b)), abs(c))\n b //= g\n a //= g\n c //= g\n code = 1 if a > 0 else -1\n b *= code\n a *= code\n c *= code\n return a, b, c", "def expotentation_by_squaring(x, y):\n if y == 0:\n return 1\n if y % 2 == 0:\n return pow(expotentation_by_squaring(x, y/2), 2)\n else:\n return x*expotentation_by_squaring(x, y-1)", "def divide(first, second):\n if second == 0:\n return 'Invalid operation'\n return first / second", "def test_coord_preceding_fs(self):", "def calc_frac_diff(x, y):\n if(y != 0):\n return math.fabs((x - y) / y)\n elif(x != 0.):\n return 1.\n else:\n return 0.", "def calc(operand_1, operand_2):\n return operand_1 / operand_2", "def testdiv_Y_X ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tdiv_fracY_fracX = fracY / fracX\r\n\t\t\tself.assertEqual ( div_fracY_fracX.toString ().split ()[0], dictDiv ['Y/X'] )", "def exquo(self, a, b):\n return a / b", "def get_real_coordinates(ratio, x1, y1, x2, y2):\n real_x1 = int(round(x1 // ratio))\n real_y1 = int(round(y1 // ratio))\n real_x2 = int(round(x2 // ratio))\n real_y2 = int(round(y2 // ratio))\n\n return real_x1, real_y1, real_x2, real_y2", "def testdiv_X_Y ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tdiv_fracX_fracY = fracX / fracY\r\n\t\t\tself.assertEqual ( div_fracX_fracY.toString ().split ()[0], dictDiv ['X/Y'] )", "def P_(x, y):\r\n return (x, y)", "def ape(x, y):\n return _div_(ae(x, y), [abs(i) for i in x])", "def test_floordiv(self):\n a = Vector(3, 5)\n c = a // (1, 2)\n assert c.x == 3\n assert c.y == 2", "def ratio_func(a, b):\n return a / b", "def subtrai(x, y):\n assert isinstance(x, (int, float)), \"x precisa ser int ou float\"\n assert isinstance(y, (int, float)), \"y precisa ser int ou float\"\n return x - y", "def __getxyB(x, y):\n\t\treturn x*3+y", "def test_scalar_division(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n\n a2 = a1 / 2\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))", "def divide(n1, n2):\n return n1 / n2", "def elementwise_division(x, y):\n if x.shape != y.shape:\n raise ValueError(\"x and y must have the same shape!\")\n\n y_star = y.clone()\n y_star[1] *= -1\n\n sqrd_abs_y = absolute_value(y).pow_(2)\n\n return elementwise_mult(x, y_star).div_(sqrd_abs_y)", "def __str__(self):\n (x, y) = simplify ((self.num), (self.denom))\n return str(x) + \"/\" + str(y)", "def y(self, x):\n return x", "def g(x, y=1):\r\n return x ** 2 + y", "def getVec(pos1, pos2):\n\n x1 = pos2[0] - pos1[0]\n y1 = pos2[1] - pos1[1]\n gcd1 = math.gcd(abs(x1), abs(y1))\n\n if gcd1 > 0:\n x = x1//gcd1\n else:\n x = x1\n if gcd1 > 0:\n y = y1//gcd1\n else:\n y = y1\n\n return x, y", "def symetrisch(x, y):\n if ((x % 10) == (y // 10)) and ((x // 10) == (y % 10)):\n return True\n else:\n return False", "def div(a,b):\r\n return a/b", "def angle1d(x: float, y: float):\n\n return np.degrees(np.arctan(y / x))", "def _maping(x,y,l,a):\n newx = (x**2 *(l* ((x**2 + y**2)**(a/2) - 1) + 2) - l * y**2 *((x**2 + y**2)**(a/2) - 1))/(x**2 + y**2) \n newy = (2 * x* y *(l* ((x**2 + y**2)**(a/2) - 1) + 1))/(x**2 + y**2)\n return newx, newy", "def calc(operand_1, operand_2):\n return operand_1/operand_2", "def calc(operand_1, operand_2):\n return operand_1/operand_2", "def calc(operand_1, operand_2):\n return operand_1/operand_2", "def d2(x0,y0,x1,y1):\n return (x0-x1)*(x0-x1) + (y0-y1)*(y0-y1)", "def bar(x, y):", "def bar(x, y):", "def bar(x, y):", "def soma(x, y):\n assert isinstance(x, (int, float)), \"x precisa ser int ou float\"\n assert isinstance(y, (int, float)), \"y precisa ser int ou float\"\n return x + y", "def division(self, first_value, second_value):\n return first_value / second_value", "def area(x, y):\n return x*y/2", "def div_proxy(x, y):\r\n f = eval('%s_div' % int_or_true_div(as_scalar(x).type in discrete_types,\r\n as_scalar(y).type in discrete_types))\r\n return f(x, y)", "def convert_coords(x, y, conversion):\n if conversion == \"cartesian\" :\n # convert to cartesian plane coordinates \n x_new = x - (width/2)\n y_new = (height/2) + y \n\n elif conversion == \"pygame\":\n # only needed to place images in pygame\n x_new = x + (width/2)\n y_new = (height/2) - y\n \n return x_new, y_new", "def get_quadrant(x, y):\n try:\n x = int(x)\n y = int(y)\n except ValueError:\n return (0)\n\n if y >= 0 and x > 0:\n return (1)\n elif y >= 0 and x < 0:\n return (2)\n elif y < 0 and x < 0:\n return (3)\n else:\n return (4)", "def get_rel_to_pole(self, x, y):\n return self.sub((x, y), self.pole)", "def ED(X,Y):", "def div2(left: float, right: float) -> float:\n return left / right", "def avg(x, y):\n return (x + y)/2", "def divide(value, arg):\n\treturn float(value) / float(arg)", "def dif_prog_div(x, y):\n\n dd = np.zeros([len(y), len(y)]) \n dd[0] = y\n dd[1,:-1] = (dd[0,1:] - dd[0,:-1]) / (x[1:]-x[:-1])\n\n for i in range(2,len(y)):\n dd[i,:-i] = (dd[i-1,1:-(i-1)] - dd[i-1,:-i]) / (x[i:]-x[:-i])\n\n return dd", "def normalize_coords(xx, yy, width, height):\n xx = (2.0 / (width - 1.0)) * xx.float() - 1.0\n yy = (2.0 / (height - 1.0)) * yy.float() - 1.0\n return xx, yy", "def user_function(x, y):\r\n return x ** 2 + 2 * y ** 2", "def map_minus_one_to_one(x, a, b):\n assert b > a\n s = 2./(b - a)\n t = (a+b)/(a-b)\n y = s*x + t\n y[y>1] = 1\n y[y<-1] = -1\n return y" ]
[ "0.7029484", "0.701576", "0.69088227", "0.69088227", "0.69088227", "0.6760368", "0.662832", "0.6536117", "0.65148705", "0.6464375", "0.64554894", "0.63766176", "0.62106633", "0.6187397", "0.617965", "0.613983", "0.6095693", "0.6090082", "0.60515857", "0.6038736", "0.5985184", "0.5956097", "0.5954202", "0.5953871", "0.59270847", "0.5920142", "0.58965933", "0.5884864", "0.5849012", "0.5849012", "0.5849012", "0.58411944", "0.58356994", "0.5822249", "0.5819433", "0.5795784", "0.57824033", "0.5780113", "0.5777608", "0.57503194", "0.5740632", "0.5701535", "0.57013553", "0.56939507", "0.5687023", "0.5668922", "0.5665368", "0.5648911", "0.5646602", "0.5645601", "0.5636878", "0.56266606", "0.56205535", "0.56168026", "0.5611301", "0.56101614", "0.5597935", "0.5590552", "0.5586232", "0.5579532", "0.55682766", "0.5559083", "0.5555837", "0.55455023", "0.55338967", "0.55332756", "0.5528667", "0.55136657", "0.5510239", "0.5491601", "0.5490692", "0.5484976", "0.54783654", "0.5477569", "0.547567", "0.54636", "0.5457837", "0.54451656", "0.544034", "0.544034", "0.544034", "0.5432774", "0.542067", "0.542067", "0.542067", "0.5412706", "0.5412539", "0.5409822", "0.540921", "0.54053116", "0.5399574", "0.5397398", "0.53897357", "0.5387289", "0.53815365", "0.5379351", "0.5362656", "0.5355605", "0.53485286", "0.5347905" ]
0.5700002
43
returns if postcode like
def is_postal_code(elem): return 'post' in elem.attrib['k']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postcode(self):\n return self._postcode", "def postcode(self):\n return self._postcode", "def validate_postcode_format(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n pcd = self.postcodes.replace(' ', '')\n # The following regular expression matches are in order to adhere to the rules for UK postcodes given in the\n # documentation.\n first_char_alpha = re.match(r'^[a-zA-Z]', pcd)\n last_char_match = re.match(r'[a-zA-Z]', pcd[-1])\n alpha_match = re.search(r'[a-zA-Z]', pcd)\n numeric_match = re.search(r'[0-9]', pcd)\n special_chars_match = re.search(r'[!#,£$%^&*¬-]', pcd)\n if len(pcd) == 0:\n response = 'Null'\n elif (5 <= len(pcd) <= 7) and first_char_alpha and alpha_match and numeric_match \\\n and last_char_match and not special_chars_match:\n response = 'Valid Postcode Format'\n else:\n response = 'Invalid Postcode Format'\n return response", "def detect_postcode_type(postcode):\n postcode_pattern = r'^[A-Z]{1,2}[0-9]{1}[0-9A-Z]{0,1}[\\s]*[0-9][A-Z]{2}$'\n district_pattern = r'^[A-Z]{1,2}[0-9]{1}[0-9A-Z]{0,1}$'\n area_pattern = r'^[A-Z]{1,2}$'\n\n postcode = clean_postcode(postcode)\n\n # Convert x to a pandas series\n postcode = pd.Series(np.atleast_1d(postcode))\n\n postcode_type = np.where(\n postcode.str.match(postcode_pattern), 'postcode',\n np.where(\n postcode.str.match(district_pattern), 'district',\n np.where(\n postcode.str.match(area_pattern), 'area', 'none'\n )\n )\n )\n\n return postcode_type", "def postcode(full_address):\n return capture_address_element(POSTCODE_PATTERN, full_address)", "def normalise_postcode(postcode):\n\n postcode = NON_ALPHA_RE.sub(\"\", postcode.upper())\n postcode = postcode[:-3] + \" \" + postcode[-3:]\n if POSTCODE_RE.match(postcode):\n return postcode\n return None", "def verify_postcode_api(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n request_path = requests.get(self.path + self.postcodes, verify=False)\n response_code = str(request_path)\n\n if response_code == '<Response [200]>':\n verification_status = 'Verified'\n elif response_code == '<Response [404]>':\n verification_status = 'Invalid Postcode'\n elif response_code == '<Response [400]':\n verification_status = 'No Postcode Submitted'\n elif response_code == '<Response [500]':\n verification_status = 'Server error'\n else:\n verification_status = 'Invalid Postcode'\n return verification_status", "def test_can_lookup_postcode(self):\n postcode_to_lookup = \"SW1A 1AA\"\n os_places_key = self.app.config.get(\"OS_PLACES_API_KEY\")\n addresses = AddressLookup(key=os_places_key).by_postcode(postcode_to_lookup)\n self.assertGreater(len(addresses), 0)\n result_postcode = addresses[0].get(\"DPA\", {}).get(\"POSTCODE\")\n self.assertEqual(result_postcode, postcode_to_lookup)", "def update_postcode(postcode, invalid = True):\r\n m = postcode_format_re.search(postcode)\r\n if m:\r\n invalid = False\r\n postcode= postcode[:5]\r\n return (invalid, postcode)", "def get_info_on_postalcode(_, postalcode):\n fourpp = int(postalcode[0:4])\n chars = postalcode[4:6]\n streets = get_streets(fourpp, chars)\n if streets:\n street = streets[0]\n town = street.postcode.city.get_official_name()\n address = street.street\n data = {'found': True, 'address': address, 'town': town}\n else:\n data = {'found': False}\n j = json.dumps(data)\n return HttpResponse(j, content_type='application/json')", "def extract_postcode(s):\n pc_regex = r'([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y]'\n pc_regex += r'[0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z]'\n pc_regex += r'))))\\s?[0-9][A-Za-z]{2})'\n\n re_search = re.search(pc_regex, s)\n if re_search:\n p = re_search.group(0)\n else:\n p = ''\n return p", "def postal_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"postal_codes\")", "def postal_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"postal_codes\")", "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES", "def test_getLocationFromPostcode1(self):\n \n pstPrc=PostcodeProcessor()\n coords=pstPrc.getLocationFromPostcode(self.postcode1)\n self.assertEqual(coords.latitude,self.longLat1.latitude)\n self.assertEqual(coords.longitude,self.longLat1.longitude)", "def test_parse_post_code_field(self):\n fields = {'Post code': {'offset': 171,\n 'length': 4}}\n p = top.Parser(fields=fields)\n received = p.parse_line(self._line)\n expected = {'Post code': '2048'}\n msg = 'Post code field parse incorrect'\n self.assertEqual(received, expected, msg)", "def parse_postalUS(self):\n \n index = self.index\n \n # US Postal Code\n if len(self.words[index]['word']) != 5 or not self.words[index]['word'].isdigit():\n return None, 0\n postal = self.words[index]['word']\n \n if index + 1 < self.length:\n if self.words[index+1]['word'] == '-':\n index += 2\n if index == self.length:\n return None, 0\n if len(self.words[index]['word']) == 4 and self.words[index]['word'].isdigit():\n postal += '-' + self.words[index]['word']\n return postal, 3\n else:\n return postal, 1\n \n return postal, 1", "def postal_code(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"postal_code\")", "def pintest(self, barcode, pin):\n u = self.dump(barcode)\n if 'ERRNUM' in u:\n return False\n return len(barcode) == 14 or pin == barcode[0] * 4", "def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }", "def is_valid(postal_code):\n return bool(re.match(UK_POST_CODE_REGEX, postal_code, re.VERBOSE)) if postal_code else False", "def postal_code(self, instance):\r\n return instance.user.profile.postal_code", "def country(alpha_2_code: str) -> None:", "def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False", "def postal(self):\n if self.index >= self.length:\n return False \n \n if self._sta and \"CA-\" in self._sta:\n self._pst, n = self.parse_postalCA()\n else:\n self._pst, n = self.parse_postalUS()\n \n if self._pst is not None:\n self.idx_pst = self.index\n self.index += n\n if self._debug: print(\"PST\", self._pst, self.idx_pst)\n return True\n return False", "def _select_market_code(code):\n code = str(code)\n if code[0] in ['5', '6', '9'] or code[:3] in [\"009\", \"126\", \"110\", \"201\", \"202\", \"203\", \"204\"]:\n return 1\n return 0", "def geocode_one(self, postcode: str, address: Optional[str] = None) -> pd.Series:\n if postcode is None and address is None:\n raise utils.GenericException(\"You must pass either postcode or address, or both.\")\n if self.gmaps_key is None:\n self.gmaps_key = self._load_key()\n if self.gmaps_key is not None:\n self.gmaps_client = googlemaps.Client(key=self.gmaps_key)\n if self.cache is None:\n self._load_cache()\n sep = \", \" if address and postcode else \"\"\n postcode = postcode if postcode is not None else \"\"\n address = address if address is not None else \"\"\n search_term = f\"{address}{sep}{postcode}\"\n if search_term in self.cache:\n logging.debug(\"Loading GMaps Geocoder API result from cache: '%s'\", search_term)\n geocode_result = self.cache[search_term]\n else:\n logging.debug(\"Querying Google Maps Geocoder API for '%s'\", search_term)\n if self.gmaps_key is None:\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})\n geocode_result = self.gmaps_client.geocode(search_term, region=\"uk\")\n self.cache[search_term] = geocode_result\n self.cache_modified = True\n if not geocode_result or len(geocode_result) > 1:\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})\n geometry = geocode_result[0][\"geometry\"]\n ok_loc_types = [\"ROOFTOP\", \"GEOMETRIC_CENTER\"]\n if geometry[\"location_type\"] in ok_loc_types or \\\n geocode_result[0][\"types\"] == [\"postal_code\"]:\n return pd.Series({\"latitude\": geometry[\"location\"][\"lat\"],\n \"longitude\": geometry[\"location\"][\"lng\"],\n \"match_status\": 3})\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})", "def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def is_code(binary_addr):\n\n classification = classifications[binary_addr]\n if classification is None or classification == partial_classification:\n return False\n return classification.is_code(binary_addr)", "def check_code(item_code):\r\n # RA matches\r\n if re.match(r'^MCRNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAN[0-9]{3,4}(\\.[0-9])?C?(\\.T)?$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAS[0-9]{5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^RNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RU[0-9]{5}(\\.T)?$', item_code):\r\n return True\r\n\r\n # Feature ID (RAN) matches\r\n if re.match(r'^RAN[0-9]{2,5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^(?P<code>RAN[1,2](\\.[0-9]{3,4}))$', item_code):\r\n return True\r\n\r\n return False", "def is_dementia(code):\n assert isinstance(code, str)\n code_set = ('294.10', '294.11', '294.20', '294.21', '2941', '29411', '2942', '29421')\n code_set += ('290',)\n code_set += ('F01', 'F02', 'F03')\n return code.startswith(code_set)", "def test_getLoctionFromPostcode2(self):\n \n pstPrc=PostcodeProcessor()\n coords=pstPrc.getLocationFromPostcode(self.postcode2)\n self.assertNotEqual(coords.latitude,self.longLat1.latitude)\n self.assertNotEqual(coords.longitude,self.longLat1.longitude)", "def geo_coder(house_number, boro_code, street_name, zip_code): \r\n wa1 = '1B{}{}{}{}{}C{}{}'.format(rightpad(house_number, 16), rightpad('', 38), boro_code, rightpad('', 10), rightpad(street_name, 32), rightpad('', 113), rightpad(zip_code, 5))\r\n wa1 = rightpad(wa1, 1200)\r\n wa2 = rightpad('', 4300)\r\n NYCGeo.NYCgeo(wa1, wa2)\r\n return wa1, wa2", "def geocode_postcode(self, postcode: [str],\n address: Optional[str] = None) -> Union[Tuple[float, float], List[Tuple[float, float]]]:\n address = [None for a in address] if address is None else list(address)\n logging.debug(\"Geocoding %s postcodes (%s addresses)\", len(postcode), len(address))\n results = []\n for pc, addr in zip(postcode, address):\n results.append(self.geocode_one(postcode=pc, address=addr))\n return results", "def postal_codes(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"postal_codes\")", "def postal_code_current(self, instance):\r\n return instance.user.profile.postal_code_current", "def get_postal_codes(pts):\n codigos = np.zeros((len(pts),))\n for i, p in tqdm(enumerate(pts), desc=\"GETTING POSTAL CODES\"):\n p = Point(p[0], p[1])\n for j in range(cod_postales.shape[0]):\n if cod_postales.geometry.iloc[j].contains(p):\n codigos[i] = cod_postales.geocodigo.iloc[j]\n return codigos[codigos != 0]", "def is_valid_postal_code(postal_code):\n is_code_valid = False\n postcode_regex = re.compile(r'^\\d{2}-\\d{3}$')\n\n if postcode_regex.search(postal_code) is not None:\n is_code_valid = True\n\n return is_code_valid", "def geocode(postcode):\n key = current_app.config.get(\"OS_PLACES_API_KEY\")\n formatted_addresses = FormattedAddressLookup(key=key).by_postcode(postcode)\n response = [{\"formatted_address\": address} for address in formatted_addresses if address]\n return Response(json.dumps(response), mimetype=\"application/json\")", "def get_postal_code(self):\n element = self.driver.find_element(*self.postalcode_textbox_selector)\n return element.get_attribute(\"value\")", "def postal_code(self):\n if \"postalCode\" in self._prop_dict:\n return self._prop_dict[\"postalCode\"]\n else:\n return None", "def postal_code(self):\n if \"postalCode\" in self._prop_dict:\n return self._prop_dict[\"postalCode\"]\n else:\n return None", "def is_geocoded(self):\n return self.position != None", "def get_jurisdiction_flag(data: dict) -> str:\n try:\n within_juris = data[\"event\"][\"data\"][\"new\"][\"austin_full_purpose\"] == \"Y\"\n return \"Y\" if within_juris else \"N\"\n except (TypeError, KeyError):\n return \"N\"", "def is_code(self, address):\n return self.is_address_of_type(address, MemoryType.Code)", "def main(postalcode):\n places = postalcodes_mexico.places(postalcode)\n click.echo(places)\n return 0", "def is_coding(self):\n return self.protein_seq is not None", "def __contains__(self, code: str) -> bool:\n return code in self._all_codes_map", "def postal_code(self):\n return self._postal_code", "def postalcode_area_studies():\n dfpawnshop = pd.read_csv(pawnmtl.csv)\n cpdic = getPostalCodeDic()\n for ik in cpdic.keys():\n print ik, cpdic[ik]", "def USCode(self, short):\n states = {\n 'AK': 'Alaska',\n 'AL': 'Alabama',\n 'AR': 'Arkansas',\n 'AS': 'American Samoa',\n 'AZ': 'Arizona',\n 'CA': 'California',\n 'CO': 'Colorado',\n 'CT': 'Connecticut',\n 'DC': 'District of Columbia',\n 'DE': 'Delaware',\n 'FL': 'Florida',\n 'GA': 'Georgia',\n 'GU': 'Guam',\n 'HI': 'Hawaii',\n 'IA': 'Iowa',\n 'ID': 'Idaho',\n 'IL': 'Illinois',\n 'IN': 'Indiana',\n 'KS': 'Kansas',\n 'KY': 'Kentucky',\n 'LA': 'Louisiana',\n 'MA': 'Massachusetts',\n 'MD': 'Maryland',\n 'ME': 'Maine',\n 'MI': 'Michigan',\n 'MN': 'Minnesota',\n 'MO': 'Missouri',\n 'MP': 'Northern Mariana Islands',\n 'MS': 'Mississippi',\n 'MT': 'Montana',\n 'NA': 'National',\n 'NC': 'North Carolina',\n 'ND': 'North Dakota',\n 'NE': 'Nebraska',\n 'NH': 'New Hampshire',\n 'NJ': 'New Jersey',\n 'NM': 'New Mexico',\n 'NV': 'Nevada',\n 'NY': 'New York',\n 'OH': 'Ohio',\n 'OK': 'Oklahoma',\n 'OR': 'Oregon',\n 'PA': 'Pennsylvania',\n 'PR': 'Puerto Rico',\n 'RI': 'Rhode Island',\n 'SC': 'South Carolina',\n 'SD': 'South Dakota',\n 'TN': 'Tennessee',\n 'TX': 'Texas',\n 'UT': 'Utah',\n 'VA': 'Virginia',\n 'VI': 'Virgin Islands',\n 'VT': 'Vermont',\n 'WA': 'Washington',\n 'WI': 'Wisconsin',\n 'WV': 'West Virginia',\n 'WY': 'Wyoming'\n }\n return states.get(short)", "def test_getLocationFromPostcodeKML1(self):\n \n pstPrc=PostcodeProcessor()\n coords=pstPrc.getLocationFromPostcodeKML(self.postcode1)\n self.assertEqual(coords.latitude,self.longLat1.latitude)\n self.assertEqual(coords.longitude,self.longLat1.longitude)", "def test_country_code(self):\n\t\tcountry_name = 'United States'\n#\t\tpopulation = int(float(pop_dict['Value']))\n\t\tcode = get_country_code(country_name)\t\t\n\t\t#Assert methods verifies result received matches expected one\n\t\tself.assertEqual(code, 'usa')", "def country_codes(country):\n countryObject = None\n try:\n countryObject = pycountry.countries.search_fuzzy(country)\n return countryObject[0].alpha_2\n except LookupError:\n pass\n try:\n splittedCountry = country.split(',')[0]\n countryObject = pycountry.countries.search_fuzzy(splittedCountry)\n return countryObject[0].alpha_2\n except LookupError:\n return 'No Code'", "def clean_postcodes(postcodes):\n postcode_df = pd.DataFrame({'Postcode':postcodes})\n postcode_df['Postcode'] = postcode_df['Postcode'].str.upper()\n\n # If length is not 7 get rid of spaces. This fixes e.g. \"SW19 2AZ\" -> \"SW192AZ\"\n postcode_df['Postcode'] = postcode_df['Postcode'].where(\n postcode_df['Postcode'].str.len() == 7, postcode_df['Postcode'].str.replace(\" \", \"\"))\n\n # If length is 5 (e.g. \"W67HZ\") add two spaces in the middle (-> \"W6 7HZ\")\n postcode_df['Postcode'] = postcode_df['Postcode'].where(\n postcode_df['Postcode'].str.len() != 5,\n postcode_df['Postcode'].str[:2]+ \" \" + postcode_df['Postcode'].str[2:])\n\n # If length is 6 (e.g. \"SW72AZ\") add a space in the middle and end(-> \"SW7 2AZ\")\n postcode_df['Postcode'] = postcode_df['Postcode'].where(\n postcode_df['Postcode'].str.len() != 6,\n postcode_df['Postcode'].str[:3]+ \" \" + postcode_df['Postcode'].str[3:])\n\n return postcode_df['Postcode'].to_numpy()", "def clean_postal_code(self):\n return self.cleaned_data['postal_code'].strip()", "def is_code_has_unknown_digit(processed_code):\n return True if list(processed_code).count(\"?\") == 0 else False", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def get_pcode(\n self,\n countryiso3: str,\n name: str,\n fuzzy_match: bool = True,\n logname: Optional[str] = None,\n ) -> Tuple[Optional[str], bool]:\n pcode = self.admin_name_mappings.get(name)\n if pcode and self.pcode_to_iso3[pcode] == countryiso3:\n return pcode, True\n name_to_pcode = self.name_to_pcode.get(countryiso3)\n if name_to_pcode is not None:\n pcode = name_to_pcode.get(name.lower())\n if pcode:\n return pcode, True\n if name in self.pcodes: # name is a pcode\n return name, True\n if self.get_admin_level(countryiso3) == 1:\n pcode = self.convert_admin1_pcode_length(\n countryiso3, name, logname\n )\n if pcode:\n return pcode, True\n if not fuzzy_match:\n return None, True\n pcode = self.fuzzy_pcode(countryiso3, name, logname)\n return pcode, False", "def decode(code):\n def h(x):\n hs = []\n for i in range(len(code)):\n if code[i] != '0' and (code[i] == '?' or code[i] == x[i]):\n hs.append(True)\n else:\n hs.append(False)\n return all(hs)\n return h", "def check_pin(self, card_number, pin):\n database_cursor.execute(f\"SELECT pin FROM card WHERE number = {card_number};\")\n result = database_cursor.fetchall()\n print(result)\n return result[0][0] == pin", "def get_pincode_info(request):\n form = PincodeForm()\n pincode_dict, errors = dict(), []\n if request.method == 'POST':\n form = PincodeForm(request.POST)\n if form.is_valid():\n pincode = form.cleaned_data.get('pincode')\n db = fetch_replica_db()\n p = db.postal_codes.find_one({'pin': pincode},\n ['pin', 'district', 'st', 's', 'cn', 'rcn', 'ud', 'oz',\\\n 'u', 'pc', 'cty', 'zn', 'dto_center', 'inc'])\n if p:\n try:\n cs = p.get('s', [])[-1]\n except IndexError:\n cs = {}\n try:\n cn = p.get('cn', [])[-1]\n except IndexError:\n cn = {}\n\n pincode_dict = [\n ('Pin', p.get('pin', '')),\n ('State code', p.get('st', '')),\n ('cod', cs.get('cod', '')),\n ('Prepaid', cs.get('pre-paid', '')),\n ('Pickup', cs.get('pickup', '')),\n ('Cash', cs.get('cash', 'N')),\n ('repl', cs.get('repl', 'N')),\n ('Octroi', cs.get('octroi', 'N')),\n ('is_oda', cs.get('is_oda', 'N')),\n ('wb_tax', cs.get('wbt', 'N')),\n ('jk_tax', cs.get('jkt', 'N')),\n ('State Tax', cs.get('sbt', 'N')),\n ('District', p.get('district', '')),\n ('Dispatch center', cn.get('cn', '')),\n ('Return center', p.get('rcn', '')),\n ('DTO center', p.get('dto_center', '')),\n ('Incoming center', p.get('inc', '')),\n ('Octroi zone', p.get('oz', '')),\n ('Code', cn.get('code', '')),\n ('Postal category', \", \".join(p.get('pc', []))),\n ('City', p.get('cty', '')),\n ('Zone', p.get('zn', '')),\n ('Sort code', cn.get('sort_code', '')),\n ('Max amount', cs.get('amt', 0.0)),\n ('Max weight', cs.get('mwt', 0.0))\n ]\n else:\n errors.append('pin code not found!')\n return render(request, 'pincode_info.html',\n {'form': form, 'pincode_dict': pincode_dict, 'errors': errors})", "def check_pra_symbol(symbol):\n # Platts\n if len(symbol) == 7 and symbol[:2] in [\n 'PC', 'PA', 'AA', 'PU', 'F1', 'PH', 'PJ', 'PG', 'PO', 'PP', ]:\n return True\n\n # Argus\n if '.' in symbol:\n sm = symbol.split('.')[0]\n if len(sm) == 9 and sm.startswith('PA'):\n return True\n\n return False", "def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue", "def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)", "def flag_regional_indicator(code: List[str]) -> str:\r\n\r\n return \"\".join([chr(ord(c.upper()) + OFFSET) for c in code])", "def test_postal_code(self):\n self.assertIsInstance(self.address.postal_code, str)\n self.assertEqual(self.address.postal_code, \"75000\")", "def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code", "def find_zip_code(x):\n i = 0\n j = 4\n for i in range(1,len(x)-6):\n string = x[i-1:i+6]\n cond = (string[1:-1].isnumeric(), not string[0].isnumeric(), not string[-1].isnumeric())\n if all(cond):\n return x[i:i+5]", "def find_data_for_post_code(self, post_code: str, response_text: list) -> dict:\n for item in response_text:\n if item.get('post code') == post_code:\n return item\n return None", "def is_reserved(code):\n return 1000 <= code <= 2999", "def country() -> str:", "def area_code(self):\n return self.number[:3]", "def flag(countrycode: str) -> str:\r\n\r\n code = [c for c in countrycode.lower() if c in ASCII_LOWER]\r\n if len(code) == 2:\r\n # Regional indicator symbols\r\n return flag_regional_indicator(code)\r\n if len(code) > 2 and len(code) < 7:\r\n # Tag sequence\r\n return flag_tag_sequence(code)\r\n found = ''.join(code)\r\n raise ValueError(\r\n 'invalid countrycode, found %d (%r) in %r.' %\r\n (len(found), found, countrycode))", "def zipcode_validation(add):\r\n lng=get_address(add)[1]\r\n lat=get_address(add)[0]\r\n engine = get_sql_engine()\r\n query = text(\r\n \"\"\"\r\n SELECT\r\n code\r\n FROM philly_zipcode\r\n WHERE ST_Intersects(geom, ST_SetSRID(ST_MakePoint(:lng, :lat), 4326))\r\n \"\"\"\r\n )\r\n resp = engine.execute(query,lng=lng, lat=lat).fetchall()\r\n return resp", "def test_getLocationFromPostcode3(self):\n \n pstPrc=PostcodeProcessor()\n try:\n _coords=pstPrc.getLocationFromPostcode(self.postcode3)\n self.assertTrue(0,'Bad coordinate should not return postcode.')\n except:\n self.assertRaises(HTTPError)", "def is_code(self) -> bool:\n return any(seg.is_code for seg in self.segments)", "def is_code_cell(cell):\n return cell[\"cell_type\"] == \"code\"", "def is_coding(self):\n return self.wt.is_coding()", "def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county", "def test_getLocationFromPostcodeKML2(self):\n \n pstPrc=PostcodeProcessor()\n try:\n _coords=pstPrc.getLocationFromPostcodeKML(self.postcode2)\n self.assertTrue(0,'Bad coordinate should not return postcode.')\n except:\n self.assertRaises(KeyError)", "def country_code(self) -> str | None:\n pass", "def provider(provider):\n if provider in (\"alditalk\", \"netzclub\", \"congstar\"):\n return True\n else:\n return False", "def url_contains_auth_code(url: str) -> bool:\n return url.count(\"code=\") == 1", "def is_hometown(town_name):\n\n\tDC = [\"washington d.c.\", \"dc\", \"washington dc\", \"d.c.\"]\n\n\tif town_name.lower() in DC:\n\t\treturn True\n\telse: \n\t\treturn False", "def get_postal_code_by_name(self, name):\n raise NotImplementedError()", "def identifyCounty(line):\n matches = re.findall('[a-zA-Z]', line)\n if len(matches) > 0 and ''.join(matches) != \"Total\":\n return True", "def postcode(self, postcode):\n\n self._postcode = postcode", "def postcode(self, postcode):\n\n self._postcode = postcode", "def postcode(self, postcode):\n\n self._postcode = postcode", "def is_in_county(p):\n try:\n float(p[0:1])\n return True\n except ValueError:\n return False", "def is_virtual(entry):\n\n if entry.get('text', '') == '':\n return 'No'\n\n # search for Invasion split cards\n regex = search('\\[This is half of the split card (.+)\\]', entry['text'])\n if regex is not None:\n return 'Yes: ' + regex.group(1)\n\n # search for Kamigawa flip cards\n regex = search('\\[Flips from (.+)\\]', entry['text'])\n if regex is not None:\n return 'Yes: ' + regex.group(1)\n\n # search for Innistrad shapeshifters\n regex = search('\\[(|.+)Back face. Transforms into (.+)\\.\\]', entry['text'])\n if regex is not None:\n return 'Yes: ' + regex.group(2)\n\n return 'No'", "def is_library(code):\n return 3000 <= code <= 3999", "def post_code(self, post_code):\n\n self._post_code = post_code", "def zacina_samohlaskou(meno):\n if not isinstance(meno,str):\n return False\n samohlasky = (\"A\", \"e\", \"i\", \"o\", \"u\", \"a\", \"E\", \"I\", \"O\", \"U\")\n if meno[0] in samohlasky:\n return True\n else:\n return False", "def is_on_dbsnp(row):\n is_on_dbsnp = 1\n\n if row[\"dbsnp\"] == \"-\":\n is_on_dbsnp = 0\n\n return is_on_dbsnp", "def is_not_used(code):\n return 0 <= code <= 999" ]
[ "0.65805596", "0.65805596", "0.6374579", "0.63254106", "0.6238786", "0.6128216", "0.6051144", "0.60343444", "0.586208", "0.58339965", "0.5811099", "0.57576764", "0.57576764", "0.5754006", "0.5704175", "0.56964314", "0.56823075", "0.56657773", "0.5656511", "0.5651277", "0.56479186", "0.56326675", "0.5614085", "0.56061846", "0.56052697", "0.5603055", "0.5600199", "0.5586636", "0.5574197", "0.5574197", "0.5574197", "0.5572051", "0.55578816", "0.5549245", "0.5538215", "0.55372596", "0.5514498", "0.5512983", "0.55086535", "0.55005765", "0.5460746", "0.5457961", "0.54455996", "0.53774524", "0.53774524", "0.5349838", "0.5345177", "0.5341226", "0.53384846", "0.533258", "0.5315489", "0.5315293", "0.5303028", "0.52787447", "0.5278298", "0.527814", "0.527185", "0.5270483", "0.5267802", "0.5263171", "0.52596796", "0.52571106", "0.52527714", "0.5237811", "0.52281225", "0.52194256", "0.52157485", "0.5211225", "0.5209141", "0.52042377", "0.518721", "0.5183155", "0.51751995", "0.51586837", "0.5154381", "0.51540923", "0.5151765", "0.5137538", "0.5106066", "0.51009893", "0.5076413", "0.5052857", "0.5051302", "0.5039515", "0.50319964", "0.5018397", "0.50111103", "0.5009726", "0.5009354", "0.50069195", "0.5005196", "0.5005196", "0.5005196", "0.500059", "0.4999232", "0.49940342", "0.49764967", "0.4973143", "0.49718085", "0.4971786" ]
0.7000855
0
Add a journal entry.
def add_entry(self, entry: str) -> None: self.entries.append(f"{self.count}: {entry}") self.count += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add():\n form = forms.JournalForm()\n if form.validate_on_submit():\n models.Journal.create(\n title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n learnt=form.learnt.data,\n resources=form.resources.data)\n flash('Entry has been created', 'success')\n return redirect(url_for('index'))\n return render_template('add.html', form=form)", "def add_journal_data(\n self,\n volume: Optional[str] = None,\n issue: Optional[str] = None,\n publication_date: Optional[str] = None,\n ):\n\n self.journal_data.append(\n JournalData(\n volume=volume, issue=issue, publication_date=publication_date\n )\n )", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def add_entry(self, *args, **kwargs):\n entry = Entry(*args, **kwargs) # NOTE: not sure this is good\n self._entries[entry.uuid] = entry\n return entry", "def add_entry(self, number: int, entry: Entry) -> None:\n raise NotImplementedError", "def add_log_entry(self, log_entry):\n self.log_entries.append(log_entry)", "def add_new_entry(self):\n clear_screen()\n new_entry = Entry.create()\n if new_entry is None:\n print(\"Add new entry cancelled. Returning to main menu...\")\n time.sleep(1)\n return None\n self.entries.append(new_entry)\n with open(self.file_name, \"a\") as file:\n writer = csv.writer(file)\n writer.writerow([new_entry.date, new_entry.name, new_entry.minutes, new_entry.note])", "def add(\n description: str = typer.Argument(\n ...,\n help=\"Description of the log entry\"\n ),\n date: datetime = typer.Option(\n datetime.now().strftime(\"%Y-%m-%d\"), '--date', '-d',\n help=\"Date of the log entry\"\n ),\n time: datetime = typer.Option(\n datetime.now().strftime(\"%I:%M %p\"), '--time', '-t',\n formats=[\"%H:%M:%S\", \"%I:%M %p\"],\n help=\"Time of the log entry\"\n )\n):\n log_entry_time = time.time()\n log_datetime = datetime.combine(date, log_entry_time)\n\n manager = LogBookManager()\n created, message = manager.create(description, log_datetime)\n\n if created:\n typer.echo(\n typer.style(message, fg=typer.colors.GREEN, bold=True)\n )\n else:\n typer.echo(\n typer.style(message, fg=typer.colors.RED, bold=True)\n )", "def test_add_journal_entry(self):\n url = reverse('journal')\n data = {\n 'game': {\n 'id': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'coverId': self.game.cover_id,\n 'backdropId': self.game.backdrop_id\n },\n 'date': '2019-06-28',\n 'review': 'cool game',\n 'spoilers': False,\n 'liked': True,\n 'rating': 5,\n 'entry_type': 'Finished',\n 'platform': 'PC',\n }\n response = self.client.post(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def add_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n introduction = request.form['introduction']\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n new_entry = Entries(title, category, buydate, introduction)\n db.session.add(new_entry)\n\n try :\n db.session.commit()\n except IntegrityError as e :\n flash(e.message)\n return redirect(url_for('show_entries_admin'))\n\n flash(u'成功添加新的条目')\n return redirect(url_for('show_entries_admin'))", "def add_entry(self, account):\n def txn():\n entry = self.entries.filter('account =', account).get()\n if not entry:\n entry = Entry(account=account, parent=self)\n entry.put()\n created = True\n else:\n created = False\n return entry, created\n return db.run_in_transaction(txn)", "def create_and_add_entry(self, **attrs):\n return self.add_entry(self.create_entry(**attrs))", "def add(self, entry):\n \"An entry is a tuple of (id, datatime, text).\"\n id = entry[0]\n datee = entry[1]\n text = re.sub('[^A-Za-z0-9]+', ' ', entry[2].lower())\n self.recordsDict[id].create(id, datee, entry[2])\n for word in text.split():\n self.wordDict[word].add(id)", "def new_entry():\n clear_screen()\n entry = {}\n entry['id'] = get_next_id()\n entry['name'] = input_name()\n print(\"How many minutes did you spend on {}?\".format(entry['name']))\n print(\"Or you may specify a format after the time, seperated by a comma\")\n entry['time_spent'] = input_time_spent()\n add_notes = input(\"Add notes? Y/n \").lower()\n if add_notes != 'n':\n entry['notes'] = input_notes()\n entry['date'] = datetime.now().strftime(FMT_MONTH_DAY_YEAR)\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writerow(entry)", "def addEntry(self, entry):\n \n with open(self.current_log, 'ab') as a:\n logAppender = csv.writer(a, delimiter=\"|\")\n logAppender.writerow(entry)", "def add_entry_to_db(entry):\n db.session.add(entry)\n db.session.commit()", "def save(journal: Journal, file: Path) -> None:\n with open(file, \"w\") as output:\n output.writelines(f\"{entry}\\n\" for entry in journal.get_entries())", "def add_entry():\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n cur = db.execute('insert into entries (title, ingredients, steps, \\\n tags, url) values (?, ?, ?, ?, ?)',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url']])\n db.commit()\n flash('Recipe, ' + escape(request.form['title'])\n + ', was successfully added', 'success')\n return view_entry(str(cur.lastrowid))\n else:\n return render_template('add_entry.html')", "def add_entry_to_bibtex_db(self, ent):\n\n # add additional fields manually to the dict\n ent.consolidate_dict()\n self.bibtex_db.entries.append(ent.raw_dict)\n # the following updates the entries dict\n # self.bibtex_db.get_entry_dict()\n # # make sure it's there\n # if ent.ID not in self.bibtex_db.entries_dict:\n # self.bibtex_db.entries_dict[ent.ID] = ent.raw_dict", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def add_entry(title: str, datetime: pendulum.datetime) -> None:\n datetime = datetime.in_tz('UTC')\n conn = sqlite3.connect('rss.db')\n c = conn.cursor()\n c.execute(\n \"\"\"insert into entries values\n (?, ?, ?, ?)\"\"\",\n (title, datetime.year, datetime.month, datetime.day)\n )\n conn.commit()\n conn.close()", "def journal_write(session, k, v):\n entry = models.VppEtcdJournal(k=k, v=v)\n session.add(entry)\n session.flush()", "def db_add_entry(person):\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n if person.name in db:\n print(\"Updating existing entry ..... {name}\\n\".format(name=person.name))\n else:\n person.new = True\n print(\"Adding new entry ..... {name}\".format(name=person.name))\n db[person.name.capitalize()] = person.phone\n db.sync()\n db.close()\n db_show_all()", "def add_entry(self, entry):\n if self.get_entry(entry):\n return entry\n\n keys, values = [], []\n for i in entry:\n keys.append(\"'{}'\".format(i))\n if not isinstance(entry[i], str):\n values.append(\"'{}'\".format(str(entry[i])))\n else:\n values.append(\"'{}'\".format(entry[i]))\n\n keys.append(\"'hash'\")\n values.append(\"'{}'\".format(self._calculate_hash(entry)))\n sql = 'INSERT INTO {t_id} ({keys}) VALUES ({values})'.format(\n t_id=self.table_id, keys=','.join(keys), values=','.join(values))\n self.fusiontables.query().sql(sql=sql).execute()", "def append(self, entry):\n if not isinstance(entry, Entry):\n raise ValueError('Not an tlv8.Entry: {e}'.format(e=entry))\n self.data.append(entry)", "def main():\n ds = 72\n title = 'Journal'\n journal_name = 'my-journal'\n headers.dashes_line(ds)\n headers.print_header(title, ds)\n data = journal.load(journal_name)\n event_loop(journal_name, data)\n # list_entries(data)\n # add_entry(data)\n # journal.save(journal_name, data)", "def add_new_entry(self, ent):\n ent.inserted = time.strftime(\"%D\")\n ent = self.add_entry(ent)\n if ent is not None:\n self.modified_collection = True\n return ent", "def add_entry(self, message_id):\n message_words = set(self.message_corpus[message_id].split())\n entry = Entry(priority=self.get_priority(message_words), wordset=message_words, id=message_id)\n self.pq.put( entry )", "def add_entry(self, entry):\n self.precomposed_entries.append(entry)", "def createJournal(self):\n\n Dialog_text = QtGui.QDialog()\n ui = Ui_Dialog_memo(\"\")\n ui.setupUi(Dialog_text, \"New journal\")\n Dialog_text.exec_()\n newText = ui.getMemo()\n journalName = ui.getFilename()\n\n if journalName is None or journalName == \"\":\n QtGui.QMessageBox.warning(None, \"Warning\", \"No journal name selected\")\n return\n #check for non-unique filename\n if any(d['name'] == journalName for d in self.journals):\n QtGui.QMessageBox.warning(None, \"Warning\", str(journalName) + \" is already in use\")\n return\n\n # update database\n newJrnl = {'name':journalName, 'journal': newText, 'owner':self.settings['codername'], 'date':datetime.datetime.now().strftime(\"%a %b %d %H:%M:%S %Y\"), 'dateM':\"\", 'status':1}\n cur = self.settings['conn'].cursor()\n cur.execute(\"insert into journal(name,journal,owner,date,dateM,status) values(?,?,?,?,?,?)\",\n (newJrnl['name'],newJrnl['journal'],newJrnl['owner'],newJrnl['date'],newJrnl['dateM'],newJrnl['status']))\n self.settings['conn'].commit()\n self.log += \"Journal \" + newJrnl['name'] + \" created\\n\"\n\n # clear and refill table widget\n for r in self.journals:\n self.tableWidget_journals.removeRow(0)\n self.journals.append(newJrnl)\n for row, itm in enumerate(self.journals):\n self.tableWidget_journals.insertRow(row)\n item = QtGui.QTableWidgetItem(itm['name'])\n self.tableWidget_journals.setItem(row, self.NAME_COLUMN, item)\n item = QtGui.QTableWidgetItem(itm['date'])\n self.tableWidget_journals.setItem(row, self.DATE_COLUMN, item)\n self.tableWidget_journals.resizeColumnsToContents()\n self.tableWidget_journals.resizeRowsToContents()", "def add_entry(self, dn, attrs):\n # Convert our dict to nice syntax for the add-function using modlist-module\n if attrs and dn:\n ldif = modlist.addModlist(attrs)\n # Do the actual synchronous add-operation to the ldapserver\n logger.info(\"add entry %s.\" % ldif)\n self._conn.add_s(dn, ldif)", "def add_entry(unique_ID,value,label):\n\t\ttry:\n\t\t\tdata[unique_ID].appendEntry(value,label)\n\t\texcept InvalidInput:\n\t\t\t#deal with bad input\n\t\t\tpass", "def add_entry(self, ent, can_replace=True):\n if self.has_entry(ent.ID):\n if not can_replace:\n self.visual.error(f\"Entry {ent.ID} already exists in the collection!\")\n return None\n # delete existing, to replace\n self.remove(ent)\n ent = self.add_entry_to_collection_containers(ent)\n if ent is None:\n return ent\n self.add_entry_to_bibtex_db(ent)\n self.visual.log(f\"Added ID: {ent.ID}\")\n return ent", "def add(self, publication: P) -> None:\n ...", "def add_page(self, edition_id, page): \n journal = self.editions.get(edition_id, None)\n if journal == None: \n raise ValueError(\"This edition of the Journal Collection does not exist\")\n else: \n journal.add_page(page)", "def addBlogEntry(self, space, title, content = ''):\n return BlogEntry.create(self.pm_getSpaceManager().addBlogEntry(self._unbox(space), title, content), self._modelDataManager)", "def post_entry(self, body, link=None, to=None, **args):\n args.update(body=body)\n if link: args.update(link=link)\n if to: args.update(to=to)\n return self.fetch(\"/entry\", post_args=args)", "def add(self, record):\n self._hist_records[record.uid] = record", "def create_entry(entry):\n Entry.create(**entry)\n return entry", "def _addLogEntry(request, action, pagename, filename):\n from MoinMoin.logfile import editlog\n t = wikiutil.timestamp2version(time.time())\n fname = wikiutil.url_quote(filename)\n\n # Write to global log\n log = editlog.EditLog(request)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)\n\n # Write to local log\n log = editlog.EditLog(request, rootpagename=pagename)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)", "def add_system_entry(entry_message, data=''):\n return partial(__add_entry,\n event_type='SYSTEM',\n entry_message=entry_message,\n data=data)", "def add_workflow_entry(entry_message, data=''):\n return partial(__add_entry,\n event_type='WORKFLOW',\n entry_message=entry_message,\n data=data)", "def log_entry(self, timestamp, entry):\n if timestamp in self.log:\n self.log[timestamp].update(entry)\n else:\n self.log[timestamp] = entry", "def add(self, redis_entry, commit=False):\n local_file_path = normpath(join(settings.DROPBOX_TEMP_STORAGE_PATH,\n str(self.bearertoken_id),\n redis_entry.local_name))\n\n # Build Solr doc.\n doc = self._convert_redis_entry_to_solr_doc(redis_entry, local_file_path)\n log.debug('Posting file to Solr: {}'.format(self.solr.url) +\n '\\nDoc: {}'.format(doc) +\n '\\nFile: {}'.format(local_file_path))\n self.solr.add_file(doc, local_file_path)\n os.remove(local_file_path) # Delete the downloaded file.\n os.remove(local_file_path + '.metadata') # Delete the metadata file.\n\n if commit:\n self.commit()", "def _add_entry(self, entry_id: int, text: str, category=None, new_field_dict=None):\n if category is None:\n category = self.active_category\n if category is None:\n raise ValueError(\"Cannot add entry without specifying category if 'active_category' is None.\")\n if entry_id < 0:\n self.CustomDialog(\"Entry ID Error\", message=f\"Entry ID cannot be negative.\")\n return False\n if entry_id in self.get_category_data():\n self.CustomDialog(\n title=\"Entry ID Error\",\n message=f\"Entry ID {entry_id} already exists in category {camel_case_to_spaces(self.active_category)}.\",\n )\n return False\n\n self._cancel_entry_id_edit()\n self._cancel_entry_text_edit()\n self.get_category_data()[entry_id] = new_field_dict # add entry to category dictionary\n self._set_entry_text(entry_id, text)\n self.select_entry_id(entry_id, set_focus_to_text=True, edit_if_already_selected=False)\n\n # TODO\n # if from_history:\n # self.jump_to_category_and_entry(category, text_id)\n # if not from_history:\n # self.action_history.record_action(\n # undo=partial(self._delete_entry, category, text_id),\n # redo=partial(self._add_entry, category, text_id, text),\n # )\n # self.unsaved_changes.add((self.active_category, text_id, 'add'))\n\n return True", "def add_entry(source,lbs):\n\tnow = datetime.now()\n\tdate = now.strftime('%m-%d-%Y')\n\tdata = {date: {'Date': date, 'Weight': lbs}}\n\tsource.inject(data)", "def new_entry(title, content):\n\n title.strip # Remove the spaces from both sides.\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n return False\n default_storage.save(filename, ContentFile(content))\n return True", "def _add_entry(self, cat_entry):\n\n # run through category apps and add orphans to Desktop\n # database, add DM and categories to database\n models.cat_apps(cat_entry)\n\n # run through and categories to database\n models.cat_list(cat_entry.categories)\n\n # create new - models.py \n cat_record = models.Categories(category=cat_entry.category) \n\n # fill in values \n cat_record.fill_record(cat_entry) \n\n BaseInfo.session.add(cat_record)\n\n try:\n BaseInfo.session.commit( )\n except exc.SQLAlchemyError:\n logger.error(\"Commit error\")", "async def add_entry(self, **values):\r\n query = \"INSERT OR IGNORE INTO {table_name} ({table_headers}) VALUES({entry_values})\"\r\n\r\n headers = \", \".join([e for e in values.keys()])\r\n entry_val = \", \".join(\"?\"*len(values.values()))\r\n attrs = [e for e in values.values()]\r\n\r\n query = query.format(table_name = self.name, table_headers=headers, entry_values=entry_val)\r\n\r\n await self.data.db.execute(query, attrs)\r\n await self.data.db.commit()", "def add_entry():\n username = util.remove_commas_from_string(request.form[\"name\"])\n link = util.remove_commas_from_string(request.form[\"ytLink\"])\n song = util.remove_commas_from_string(request.form[\"songName\"])\n\n festive = CHRISTMAS_MODE and \"christmasSong\" in request.form\n\n with database.connect_to_database() as db:\n user_id = database.get_userid(db, username)\n database.add_song(db, link, song, user_id, month=12 if festive else None)\n\n return redirect(url_for('main'))", "def save_journal(self, ical=None, no_overwrite=False, no_create=False, **ical_data):\n j = Journal(\n self.client,\n data=self._use_or_create_ics(ical, objtype=\"VJOURNAL\", **ical_data),\n parent=self,\n )\n j.save(no_overwrite=no_overwrite, no_create=no_create, obj_type=\"journal\")\n self._handle_relations(j.id, ical_data)\n return j", "def add_entry(self, entry_or_resource):\n def validate_resource_type(data):\n if 'resourceType' not in data:\n raise ValueError(f\"ill formed bundle entry: {data}\")\n\n if 'resource' not in entry_or_resource:\n # Bundles nest each entry under a 'resource'\n validate_resource_type(entry_or_resource)\n entry = {'resource': entry_or_resource}\n else:\n validate_resource_type(entry_or_resource['resource'])\n entry = entry_or_resource\n\n self.entries.append(entry)", "def save_txt():\n # open file and append, if it doesn't exist then create it.\n with open('journal_entries.txt', 'a+') as f:\n # .get the input in text widget at the first line, '0th' character, then read until the end\n f.write(\"\\n\" + get_date_time())\n for i in range(len(entries)):\n string = entries[i].get('1.0', 'end-1c')\n if string:\n f.write(\"\\n\" + string)", "def add_entry():\n\n host = input('\\nEnter Mail Server Host: ')\n email = input('\\nEnter Email ID: ')\n password = getpass(prompt='\\nEnter Password: ')\n mailbox = input('\\nEnter MailBox: ')\n mobile = input('\\nEnter Mobile Number: ')\n\n if not isfile('data.json'):\n print('No input data.json found...')\n create_input_file()\n\n append_entry(host, email, password, mailbox)", "def _add_account_entry(csv_content, journal_id, date, description, lines):\n for counter, line in enumerate(lines):\n csv_content.extend([\n {'journal_id': journal_id if counter == 0 else '',\n 'date': arrow.get(date).format('YYYY-MM-DD') if counter == 0 else '',\n 'ref': description if counter == 0 else '',\n 'line_ids/account_id': line['account_id'],\n 'line_ids/name': description,\n 'line_ids/debit': line['debit'] if 'debit' in line else '',\n 'line_ids/credit': line['credit'] if 'credit' in line else ''}\n ])", "def add_entries(self, *entries: Entry):\n for entry in entries:\n self.add_entry(entry)", "def createNewBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createNewEntryDocument()\r\n self._initNewEntryDocument(atomdoc)\r\n return ZAtomNewBlogEntry(atomdoc)", "def add_entries(self, entries):\n\n # If entries is a single entry, put in list for processing below\n if isinstance(entries, str):\n entries = [entries]\n\n for entry in entries:\n #Check if entries already exist\n try:\n self.entries[entry]\n # Entry doesn't already exist\n except KeyError:\n # Validate that entry is either an attribute of owner or in SystemLogEntries\n if not entry in SystemLogEntries and not entry in self.owner.__dict__:\n raise LogError(\"{0} is not an attribute of {1} or in SystemLogEntries\".\n format(entry, self.owner.name))\n # Add entry to self.entries dict\n self.entries[entry] = []\n\n # Entry exists\n else:\n # Issue warning and ignore\n warnings.warn(\"{0} is already an entry in log for {1}; use \\\"log_entry\\\" to add a value\".\n format(entry,self.owner.name))", "def create_entry(self, entry_group_name, entry_id, entry):\n try:\n entry = self.__datacatalog.create_entry(parent=entry_group_name,\n entry_id=entry_id,\n entry=entry)\n self.__log_entry_operation('created', entry=entry)\n return entry\n except (exceptions.FailedPrecondition,\n exceptions.PermissionDenied) as e:\n entry_name = '{}/entries/{}'.format(entry_group_name, entry_id)\n self.__log_entry_operation('was not created',\n entry_name=entry_name)\n raise e", "def put_attach_document(filename: str, entry_hash: str) -> str:\n g.ledger.file.insert_metadata(entry_hash, \"document\", filename)\n return f\"Attached '{filename}' to entry.\"", "def add(config, args):\n\tif args == []:\n\t\thelp()\n\t\treturn\n\tamount, context, date = 0, \"\", \"\"\n\tfor arg in args:\n\t\tif re.match(\"^[0-9]*([\\.,][0-9]{0,2}){0,1}$\", arg) and not amount:\n\t\t\tamount = float(arg.replace(',', '.'))\n\t\telif arg.startswith(\"@\") and (len(arg) > 1):\n\t\t\tcontext = ','.join([context, arg[1:]])\n\t\telif not date:\n\t\t\ttry:\n\t\t\t\tdate = datetime.datetime.strptime(arg, \"%Y-%m-%d\")\n\t\t\t\tdate = date.strftime(\"%Y-%m-%d\")\n\t\t\texcept(ValueError):\n\t\t\t\thelp()\n\t\t\t\treturn\n\n\tcontext = context.strip(',')\n\tif not date:\n\t\tdate = datetime.date.today().isoformat()\n\tif (amount != 0):\n\t\tw = csv.DictWriter(open(config['dbfile'], 'a'), delimiter=';',\n\t\t\tfieldnames=['date', 'amount', 'context'],\n\t\t\tlineterminator='\\n')\n\t\trow = { 'date' : date, 'amount' : amount, 'context' : context }\n\t\tw.writerow(row)\n\t\tprint(\"added:\", ';'.join([date,str(amount),context]))", "def add(self, archive):\n if archive.name not in self._root['archives']:\n self._root['archives'][archive.name] = archive", "def new_archive_record(self, event):\n dbmanager = self.engine.db_binder.get_manager(self.data_binding)\n dbmanager.addRecord(event.record)", "def addEntry(self, symbol, address):\n self.table[symbol] = address", "def add_entry(self, entry): # Hashmap.add_entry\n\n if entry.hexdigest in self.contentHash:\n self.contentHash[entry.hexdigest].append(entry)\n else:\n self.contentHash[entry.hexdigest] = [ entry ]\n\n if entry.depth < self.minDepth:\n self.minDepth = entry.depth", "def append(self, entry):\n self.strings.append(entry)", "def add(self):\r\n self._svn('add')", "def add(self, entry):\n s = sppasUnicode(entry)\n entry = s.to_strip()\n if self.__case_sensitive is False:\n s = sppasUnicode(entry)\n entry = s.to_lower()\n\n if entry not in self.__entries:\n self.__entries[entry] = None\n return True\n\n return False", "def getJournal(self):\n if 'journal' in self.extras: \n return self.extras['journal']\n #--Default \n self.extras['journal'] = _('[No Journal Record Found.]')\n #--Open save file and look for journal entry\n inPath = os.path.join(self.dir,self.name)\n ins = Tes3Reader(self.name,file(inPath,'rb'))\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n if name != 'JOUR':\n ins.seek(size,1,name)\n #--Journal\n else:\n (subName,subSize) = ins.unpackSubHeader('JOUR')\n if subName != 'NAME':\n self.extras['journal'] == _('[Error reading file.]')\n else:\n reDate = re.compile(r'<FONT COLOR=\"9F0000\">(.+?)</FONT><BR>')\n reTopic = re.compile(r'@(.*?)#')\n data = ins.read(subSize)\n data = reDate.sub(ReplJournalDate(),data)\n data = reTopic.sub(r'\\1',data)\n self.extras['journal'] = cstrip(data)\n break\n #--Done\n ins.close()\n print self.extras['journal']\n return self.extras['journal']", "def add_entry_to_collection_containers(self, ent):\n\n ID = ent.ID.lower()\n title = ent.title.lower()\n # update object lookup dict\n if ID in self.entries:\n self.visual.error(\"Entry with id {} already in entries dict!\".format(ID))\n return None\n self.entries[ID] = ent\n # update title-id mapping\n self.title2id[title] = ID\n for auth in ent.author:\n if auth not in self.author2id:\n self.author2id[auth] = []\n self.author2id[auth].append(ID)\n\n # update ids and titles lists\n self.id_list.append(ID)\n self.title_list.append(title)\n # update maximum ID / title lengths\n if len(ent.ID) > self.maxlen_id:\n self.maxlen_id = len(ent.ID)\n if len(ent.title) > self.maxlen_title:\n self.maxlen_title = len(ent.title)\n if ent.file:\n self.all_pdf_paths.append(ent.file)\n return ent", "def add_entry(self, entry: Entry) -> bool:\n for e in self.get_entries():\n if e.get_name() == entry.get_name():\n return False\n self.__entries.append(entry)\n self.__entries.sort()\n return True", "def append_entry(host, email, password, mailbox):\n\n new_entry = {\n\n 'host': host,\n 'email': email,\n 'password': password,\n 'mailbox': mailbox\n }\n\n with open('data.json') as f:\n data = load(f)\n\n data[\"items\"].append(new_entry)\n\n with open('data.json', 'w') as outfile:\n dump(data, outfile, indent=4)\n\n print('\\nNew Entry Added Successfully!')", "def add_article(self, article):\n pass", "def addQueueEntry(*args):\n try:\n #A unique id for each command.\n self.cmd_seq = self.cmd_seq + 1\n #Create a new queu entry\n self.entries[self.cmd_seq] = _QueueEntry(self, name, args, self.cmd_seq, self.log)\n #append it to the command queue\n self.queue.append(self.cmd_seq)\n #Return handle to the new entry for setting callbacks on.\n return self.entries[self.cmd_seq]\n except Exception as ex:\n self.log.failure(\"Error in addQueueEntry {err!r}\",err=str(ex))", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def add(self, path, title, author):\n path = path.decode('utf8')\n title = title.decode('utf8')\n author = author.decode('utf8')\n cursor = self._dbcon.cursor()\n filename = os.path.basename(path)\n dirname = os.path.dirname(path)\n t = (title, author, filename, dirname)\n sql = u\"insert into books values (?, ?, ?, ?)\"\n cursor.execute(sql, t)\n self._dbcon.commit()\n cursor.close()", "def add_entry(self, new_entry):\n existing_entry = self._entries.get(new_entry.key)\n if existing_entry is not None:\n existing_entry.add_menge(new_entry.get_menge())\n for occ in new_entry.occurrences:\n existing_entry.add_occurrence(occ)\n return existing_entry\n else:\n self._entries[new_entry.key] = new_entry\n self._order.append(new_entry.key)\n return None", "def add(self, author, post):\n if not author in self.authors:\n self.authors.append(author)\n self.posts[author].append(post)\n return", "def add_item_entry(self, the_spec):\n debug(\"Adding entry {}\".format(the_spec))\n entry = tk.Entry(self.current_parent)\n self.entries[the_spec.value] = entry\n if not self.parent_is_grid:\n entry.pack()\n return entry", "def creating_entry(self):\n response = \"\"\n today = str(date.today())\n curent_time = str(datetime.time(datetime.now()))\n entry = Diary(self.entry_id, self.title, self.body)\n lst = {}\n lst[\"entry_id\"] = entry.entry_id\n lst[\"title\"] = entry.title\n lst[\"date\"] = today\n lst[\"time\"] = curent_time\n lst[\"body\"] = entry.body\n lst[\"updated\"] = entry.updated\n if Validate.validate_entry(Diary.entries, entry):\n response = jsonify({\"message\": \"Duplicate data,Try again\"})\n response.status_code = 409\n else:\n Diary.entries.append(lst)\n response = jsonify({\"message\": \"Entry saved\", \"data\": lst})\n response.status_code = 201\n return response", "def bark(self, event=None):\n if event is not None:\n try:\n Journal.objects.create(\n name=self,\n notes=event\n )\n return True\n except Exception as e:\n print('{0} {1}'.format(time.strftime('%Y%m%d.%H%M%S+UTC', time.gmtime()), event))\n print('{0} {1}'.format(time.strftime('%Y%m%d.%H%M%S+UTC', time.gmtime()), e))\n return False", "def journal_name(self, journal_name):\n\n self._journal_name = journal_name", "def add(name, number, phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n if phonebook_data.get(name):\n raise DuplicateError(\"This entry already exists. To make changes, \"\n \"use update_number or update_name.\")\n\n else:\n phonebook_data[name] = number\n print \"Entry added:\", name, number\n save(phonebook_data, phonebook)", "def main(args, loglevel):\n\tlogging.basicConfig(format=\"%(levelname)s: %(message)s\", level=loglevel)\n\tlogging.debug(\"Your Argument: %s\" , args.date)\n\tvalidate(args.date)\n\tresult = fetchJournalEntries(args.date)\n\tfinalResult = printJournal(result)\n\tfinalResult", "def create_entry():\n new_entry = DB_Entry() # Create instance of entry to add the info to\n print('Eratosthenes is ready to add your new entry.\\n')\n new_entry.set_id()\n title = input('Enter the title:\\n')\n new_entry.set_title(title)\n authors = input('Enter the authors as list of surname, firstname separated by semicolons:\\n')\n new_entry.set_authors(authors)\n try:\n year = int(input('Enter the year:\\n'))\n except ValueError:\n try:\n year = int(input('Enter the year as an integer:\\n'))\n except ValueError:\n print('You failed to follow basic instructions. The year is set to 2000\\n')\n year = 2000\n new_entry.set_year(year)\n pub_type = input('Enter the publication type as article/review/book/other:\\n')\n try:\n new_entry.set_type(pub_type)\n except ValueError:\n try:\n pub_type = input('Type must be one of article/review/book/other:\\n')\n new_entry.set_type(pub_type)\n except ValueError:\n print('You failed to follow basic instructions. Type is now set to \\'other\\'\\n')\n pub_type = 'other'\n new_entry.set_type(pub_type)\n keywords = input('Enter list of keywords separated by semicolons:\\n')\n new_entry.set_keywords(keywords.split(';'))\n current_path = input('Enter the current path to the file\\n')\n current_path = current_path.replace('~', '/Users/marcus')\n if not os.path.isfile(current_path):\n print('File not found. Please try again')\n current_path = input('Enter the current path to the file\\n')\n if not os.path.isfile(current_path):\n print('File not found')\n new_entry.set_new_path()\n db_actions.copy_file(new_entry.get_path(), current_path)\n return new_entry", "def load(journal: Journal, file: Path) -> None:", "def add_entry(results):\n db = get_db()\n data_to_insert = format_json_to_dicts(results)\n\n db.executemany(\"insert into python_repos ( repo_id, name, url, created_date, last_push_date, description, stars, avatar) \\\n values (:repo_id, :name, :url, :created_date, :last_push_date, :description, :stars, :avatar)\", data_to_insert)\n\n db.commit()\n flash('Updated ' + str(time.strftime(\"%Y-%m-%d %H:%M\")))\n return redirect(url_for('show_entries'))", "def add_log_entry_string(self, logstring):\n parsed = self.parse_log_entry(logstring)\n self.add_log_entry(parsed)", "def add_entry(self, start_day, start_hour, stop_day, stop_hour, mode, radar=[\"ALL\"]):\n self.entries.append(Entry(self.year, self.month, start_day, start_hour, stop_day, stop_hour, mode, radar))", "def addLog(log_info,seed_keyword=\"\",meta_keyword=\"\"):\n payload = {\n \"user\" : os.getlogin(),\n \"seed_keyword\":seed_keyword,\n \"meta_keyword\":meta_keyword,\n \"log_info\":log_info\n }\n res = requests.post('{}add/issue/'.format(base_url),data=payload)\n return res.status_code", "def add_book(name, author):\n BOOKS.append({'name': name, 'author': author, 'read': False})", "def add_to_cache(filename: str, entry: str) -> None:\n\n with open(filename, \"a\") as f:\n f.write(entry.strip())\n f.write(\"\\n\")", "def add(self,\n address: Union[str, Address],\n label: str,\n **kwargs) -> AddressBookEntryModel:\n if isinstance(address, str):\n address = Address(address=address, network=self._network)\n request_model = AddRequest(address=address, label=label)\n data = self.post(request_model, **kwargs)\n data['address'] = Address(address=data['address'], network=self._network)\n return AddressBookEntryModel(**data)", "def add_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n je.Editor(self.session, self.source.tbl, self.source)", "def post_comment(self, char, event):\r\n msg = self.cleaned_data[\"journal_text\"]\r\n white = not self.cleaned_data[\"private\"]\r\n char.messages.add_event_journal(event, msg, white=white)", "def viewJournal(self):\n\n x = self.tableWidget_journals.currentRow()\n Dialog_memo = QtGui.QDialog()\n ui = Ui_Dialog_memo(self.journals[x]['journal'])\n ui.setupUi(Dialog_memo, self.journals[x]['name'] +\" \"+ self.journals[x]['owner'] +\", \"+self.journals[x]['date'])\n Dialog_memo.exec_()\n # update model and database\n newText = ui.getMemo()\n newText = newText.decode('unicode-escape')\n try:\n pass\n newText = newText.decode(\"utf-8\", \"replace\")\n except UnicodeDecodeError:\n print(\"unicode error\")\n if newText != self.journals[x]['journal']:\n self.journals[x]['journal'] = newText\n cur = self.settings['conn'].cursor()\n cur.execute(\"update journal set journal=? where name=?\", (newText, self.journals[x]['name']))\n self.settings['conn'].commit()", "def add_a_new_entry(self):\n id = self.input_id()\n name = self.input_name()\n birthday = self.input_birthday()\n midterm = self.input_score(1, 'Input Midterm Score')\n finalterm = self.input_score(1, 'Input Finalterm Score')\n\n new_list = pd.DataFrame(\n [[id, name, pd.Timestamp(birthday), midterm, finalterm, np.nan, np.nan]],\n columns=self.columns)\n new_list.astype(self.dtype)\n\n self.merge_list(new_list)", "def on_submit(self):\n\n\t\tfor accounting_entry in self.get('accounting_entries'):\n\t\t\tledger_entry_doc = frappe.get_doc({\n\t\t\t\t'doctype': 'Ledger Entry',\n\t\t\t\t'posting_date': self.posting_date,\n\t\t\t\t'account': accounting_entry.account,\n\t\t\t\t'debit': accounting_entry.debit,\n\t\t\t\t'credit': accounting_entry.credit,\n\t\t\t\t'voucher_type': 'Journal Entry',\n\t\t\t\t'voucher_number': self.name,\n\t\t\t\t'company': self.company\n\t\t\t})\n\t\t\tledger_entry_doc.insert()", "def add(args):\n name = args[1]\n number = args[2]\n phonebook = args[3]\n with open(phonebook, 'a') as f:\n f.write('%s %s\\n' % (name, number))\n f.close()\n return [\"Successfully added %s.\" % name]" ]
[ "0.7191207", "0.7089341", "0.70372695", "0.6757535", "0.66126496", "0.6612139", "0.6573566", "0.6421015", "0.63883895", "0.63381886", "0.6327133", "0.622145", "0.61824554", "0.61513895", "0.60793686", "0.6065312", "0.6048327", "0.5997899", "0.59614867", "0.59608066", "0.5960753", "0.5960314", "0.5933251", "0.590133", "0.5886712", "0.585903", "0.5825395", "0.58081627", "0.57588476", "0.5743558", "0.5717617", "0.57128435", "0.5690161", "0.5686018", "0.56836826", "0.5644892", "0.55939025", "0.5580057", "0.55596817", "0.55524784", "0.55306125", "0.55169564", "0.55119795", "0.550009", "0.54976165", "0.5492156", "0.5486866", "0.5479233", "0.54789895", "0.5476158", "0.5469821", "0.5468346", "0.5464949", "0.54457784", "0.54350096", "0.5429503", "0.5426701", "0.54188204", "0.5413144", "0.5410183", "0.540217", "0.5390532", "0.5385374", "0.5382153", "0.537942", "0.53791016", "0.5375341", "0.53660834", "0.53455734", "0.53340536", "0.53248537", "0.53204435", "0.53169495", "0.5316629", "0.5315977", "0.5315977", "0.53127927", "0.5310291", "0.53070927", "0.530482", "0.52985704", "0.52956504", "0.529254", "0.52923185", "0.5290651", "0.5272928", "0.526588", "0.52586114", "0.5252993", "0.5245991", "0.5242964", "0.5239676", "0.52334887", "0.52207285", "0.5219402", "0.52109325", "0.52102494", "0.5206041", "0.5203134", "0.520023" ]
0.62996215
11
Remove journal entry at position `pos`.
def remove_entry(self, pos: int) -> None: del self.entries[pos]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_pos(self, pos):\n yield from self.command('delete {}'.format(pos))\n return True", "def delete(self, pos):\n if self.is_playing() and self.current_position() == pos:\n self.x.playback_stop().wait()\n self.x.playlist_remove_entry(pos).wait()", "def delete_row(self, pos):\n del self._grid[pos]", "def pyreadline_remove_history_item(pos: int) -> None:\n # Save of the current location of the history cursor\n saved_cursor = readline.rl.mode._history.history_cursor\n\n # Delete the history item\n del readline.rl.mode._history.history[pos]\n\n # Update the cursor if needed\n if saved_cursor > pos:\n readline.rl.mode._history.history_cursor -= 1", "def delete(self,pos):\n pos.next = pos.next.next", "def delete_data(self, *pos):\n r, c = pos\n self._grid[r][c] = None", "def remove(self, pos, length):\n if pos in self.removals:\n self.removals[pos] += length\n else:\n self.removals[pos] = length", "def remove_from_level(level, position):\n size = level_size(level)\n index = position_to_index(position, size)\n level = level[:index] + AIR + level[1 + index:]\n return level", "def delete(self, del_pos=None):\n if del_pos is None:\n del_pos = self.__length\n if self.__list is None:\n print \"Nothing to remove.\"\n else:\n if del_pos == 0:\n self.__list = self.__list.get_next()\n else:\n prior = self.__list\n current = self.__list.get_next()\n current_pos = 1\n while current.get_next() is not None and current_pos < del_pos:\n prior = current\n current = current.get_next()\n current_pos += 1\n prior.set_next(current.get_next())\n self.__length -= 1", "def delete_node_at_pos(self, pos):\n if self.head:\n cur_node = self.head\n if pos == 0:\n self.head = cur_node.next\n cur_node = None\n return \n\n prev = None\n count = 0 \n while cur_node and count != pos:\n prev = cur_node\n cur_node = cur_node.next\n count += 1\n\n if cur_node is None:\n return \n\n prev.next = cur_node.next\n cur_node = None", "def delete(self, node):\n\n # logger_cagada.debug(\"norrando nodo %s\" % (type(node)))\n entry = self.entry_finder.pop(node)\n # logger_cagada.debug(\"la entry q c borra %s\" % entry)\n entry[-1] = self.REMOVED\n # logger_cagada.debug(\"el heap es %s\" % self.heap)\n return entry[0]", "def track_del(self,posicion):\n self.tracks.pop(posicion)", "def DeleteToolByPos(self, pos):\r\n \r\n if pos >= 0 and pos < len(self._items):\r\n \r\n self._items.pop(pos)\r\n self.Realize()\r\n return True\r\n\r\n return False", "def delete_column(self, pos):\n for i in range(len(self._grid)):\n del self._grid[i][pos]", "def troop_remove(self, pos):\n x, y = pos\n # tile_id = AIV_SIZE * y + x\n \n troop = self.tmap[y, x]\n if (troop == 0):\n return\n \n # update tmap\n self.tmap[y, x] = 0\n\n # first remove thing from tarr, then find something new in tmap\n\n\n # for slot in range(0, len(self.tarr)):\n # if (self.tarr[slot] == tile_id):\n # self.tmap[y, x] = slot//10\n \n # # update tarr\n # for slot in range(10*troop, 11*troop):\n # if (self.tarr[slot] == tile_id):\n # for slot_slot in range(slot, 11*troop-1):\n # self.tarr[slot_slot] = self.tarr[slot_slot+1]", "def deleteEntry (self,event=None):\n \n c = self.c ; box = self.box\n \n # Work around an old Python bug. Convert strings to ints.\n items = box.curselection()\n try:\n items = map(int, items)\n except ValueError: pass\n \n if items:\n n = items[0]\n p = self.position[n]\n del self.positionList[n]\n if p in c.visitedList:\n c.visitedList.remove(p)\n self.fillbox()", "def cleanTileAtPosition(self, pos):\n self.tiles[pos] = 'clean'", "def remove(self, e):\n p = self._find_position(e) # try to locate existing element\n if p is not None:\n self._data.delete(p) # delete if found", "def remove_column(self, pos, labels=\"REMOVE\"):\n MutableAlignment.remove_column(self, pos)\n if labels == \"RESET\":\n self._reset_col_names()\n elif labels == \"REMOVE\":\n self._col_labels = self._col_labels[:pos] + \\\n self._col_labels[pos + 1:]", "def remove(self, item) -> None:\n entry = self.entry_finder.pop(item)\n entry[-1][0] = None", "def remove(self, val):\n if val in self.posFind and self.posFind[val] != -1:\n delPos = self.posFind[val]\n self.nums[delPos] = self.nums[-1]\n self.posFind[self.nums[-1]] = delPos\n self.posFind[val] = -1\n self.nums.pop()\n return True\n return False", "def delete_entry(title):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)", "def fs_remove_entry(self, path):\n\t\treturn Job(SDK.PrlSrv_FsRemoveEntry(self.handle, path)[0])", "def remove(self, uid: int) -> int:\n\n bookmark = self.find_id(uid)\n\n deletions = 0\n\n if bookmark:\n deletions = self._delete(\n \"UPDATE bookmarks SET deleted=CURRENT_TIMESTAMP WHERE rowid=?\",\n (bookmark[\"rowid\"],)\n )\n\n if bookmark[\"tags\"]:\n cherrypy.engine.publish(\"cache:clear\", \"bookmarks:all_tags\")\n\n return deletions", "def delete_after_position(self, position: int) -> None:\n if self.get_length() < position:\n return\n elif self.get_length() == 0:\n return self.delete_at_beginning()\n elif self.get_length() == position:\n return self.delete_at_end()\n else:\n previous = self.head\n current = self.head\n for _ in range(position):\n previous = current\n current = current.get_next_node()\n previous.set_next_node(current.get_next_node())\n current.get_next_node().set_previous_node(previous)\n current.set_next_node(None)\n current.set_previous_node(None)\n temp = current.get_data()\n del current\n self._decrease_length()\n return temp", "def _delChar(self, pos):\n nonGlyph = countInSet(self.text[:pos], self.NO_GLYPH_CHARS)\n\n self.allVertices = self.allVertices[:(pos - nonGlyph) * 4]\n self.allIndices = self.allIndices[:pos - nonGlyph]\n self.colors.pop(pos)\n self._string_metric = self._string_metric[:pos]\n self.text = self.text[:pos] + self.text[pos + 1:]\n self._updateGlyphs(pos)", "def DeleteElement(self, position):\n self.__context.builder.DocumentElementDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n position)", "def delete_entry_from_db(entry):\n db.session.delete(entry)\n db.session.commit()", "def cleanTileAtPosition(self, pos):\n #Return the floor of x as a float, the largest integer value less than\n #or equal to x\n posx = pos.getX()\n posy = pos.getY()\n posx = math.floor(posx)\n posy = math.floor(posy)\n self.tiles[(posx, posy)] = 1 # using 0 as dirty value, 1 as clean value, of key tuple pos(x,y)\n #self.printTiles()\n #raise NotImplementedError", "def _del_entry(self, cvs_path):\n\n del self._entries[cvs_path]", "def delete_entry():\n u_id = request.args(0) or redirect(URL('moderation', 'new_entries'))\n db(db.lioli_main.unique_id == u_id).delete()\n redirect(URL('new_entries'))\n return dict()", "def deleteJournal(self):\n\n x = self.tableWidget_journals.currentRow()\n journalName = self.journals[x]['name']\n #print((\"Delete row: \" + str(x)))\n Dialog_confirmDelete = QtGui.QDialog()\n ui = Ui_Dialog_confirmDelete(self.journals[x]['name'])\n ui.setupUi(Dialog_confirmDelete)\n ok = Dialog_confirmDelete.exec_()\n\n if ok:\n cur = self.settings['conn'].cursor()\n cur.execute(\"delete from journal where name = ?\", [journalName])\n self.settings['conn'].commit()\n for item in self.journals:\n if item['name'] == journalName:\n self.journals.remove(item)\n self.tableWidget_journals.removeRow(x)\n self.log += \"Journal \" + journalName + \" deleted\\n\"", "def remove_entry(self, number: int) -> None:\n raise NotImplementedError", "def building_remove(self, pos):\n x, y = pos\n step = self.bmap_step[y,x]\n\n if step == 0:\n return\n\n for x in range(0,AIV_SIZE):\n for y in range(0,AIV_SIZE):\n if (self.bmap_step[y,x] == step):\n self.bmap_step[y,x] = 0\n self.bmap_id[y,x] = 0\n self.bmap_size[y,x] = 0\n self.bmap_tile[y,x] = 0\n\n self.step_cur -= 1\n self.step_tot -= 1", "def remove():", "def delete_positions(self, positions):\n\n for position in positions:\n self.positions.remove(position)\n\n self.db_upsert(force_insert=True)", "def delete_node_position(self, position):\n if not self.head:\n print('List is empty. No item to delete')\n return\n if position == 1:\n self.head = self.head.next\n return\n temp = self.head\n count = 1\n while temp and count < position - 1:\n count += 1\n temp = temp.next\n if not temp:\n print('Node doesn\\'t exist')\n return\n temp.next = temp.next.next", "def pop_pos(self, pos=0):\n array = self.array\n item = array[pos]\n if len(array) == 1:\n del array[pos]\n else:\n compare = self.compare\n del self.pos[array[pos]]\n if pos == len(array) - 1:\n array.pop() \n else:\n array[pos] = array.pop() \n self.pos[array[pos]] = pos\n low, high = pos, pos*2 + 1\n while high < len(array):\n if ((high+1 < len(array)\n and compare(array[high], array[high+1]) > 0)):\n high = high+1\n if compare(array[low], array[high]) <= 0:\n break\n self.pos[array[high]] = low\n self.pos[array[low]] = high\n array[low], array[high] = array[high], array[low]\n low, high = high, 2*high+1\n return item", "def _del_entry(self, cvs_path):\n\n self._make_writable()\n self._del_entry(cvs_path)", "def cleanTileAtPosition(self, pos):\n\t\tfor i in self.tiles:\n\t\t\tif i.positionInTile(pos):\n\t\t\t\ti.cleanTile()", "def remove_row(self, row_id):", "def del_item(self, item):\n index = self.board[item.pos[0]][item.pos[1]].index(item)\n del self.board[item.pos[0]][item.pos[1]][index]", "def removePiece(self, address):\r\n\r\n try:\r\n del self.pieces[address]\r\n except KeyError:\r\n print(\"error removing piece!\")", "def remove(self, data_id, idx):\n temp = self.database[data_id]\n del temp[idx]\n self.database[data_id] = temp", "def del_row(self, index):\n self.data.remove(self.data[index])", "def cleanTileAtPosition(self, pos):\n posX = pos.getX()\n posY = pos.getY()\n if (int(posX), int(posY)) not in self.clean_tiles:\n self.clean_tiles.append((int(posX), int(posY)))", "def cleanTileAtPosition(self, pos):\n #convert pos to grid reference.\n #check if grid reference is in tilesCleaned\n self.x = math.floor(pos.getX())\n self.y = math.floor(pos.getY())\n if (self.x, self.y) not in self.tilesCleaned:\n self.tilesCleaned.append((self.x, self.y))\n #self.room[pos.getX()][pos.getY()]==0 #set position to clean (array element = 0)\n #this method does not return anything.\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html", "def del_row(self, row_index):\n ...", "def remove_card(self, pos=-1):\n if not self.is_empty():\n if pos < 0:\n card = self.cards.pop()\n else:\n card = self.cards.pop(pos)\n self.set_card_positions()\n return card\n return None", "def remove(self, document):\n return self.db.pop(document['id'], None)", "def clear_pos(self, addr: str):\n self.get_pos(addr).clear()", "def removeDataAt(self, address: ghidra.program.model.address.Address) -> None:\n ...", "def delete(self, entry): # Hashmap.delete\n\n entry.delete()\n\n # remove the entry from the hashmap\n list=self.contentHash[entry.hexdigest]\n newlist = []\n for e in list:\n if e != entry:\n newlist.append(e)\n\n # if there are no more entries for this hashval, remove\n # it from the dictionary m\n if len(newlist):\n self.contentHash[entry.hexdigest] = newlist\n else:\n del self.contentHash[entry.hashval]\n\n # also remove all the deleted children from the hashmap\n self.prune()", "def _remove(self, cell_coord, o):\n cell = self.d[cell_coord]\n cell.remove(o)\n\n # Delete the cell from the hash if it is empty.\n if not cell:\n del(self.d[cell_coord])", "def remove_item(self, idx_of_item):\n del self.items[idx_of_item]", "def truncate(self, pos=None):\n ...", "def delete(self):\n del self.shx.atoms[self.index]", "def delete_entry(self, user, entry):\r\n try:\r\n self.curs.execute(f\"\"\"DELETE FROM {user} WHERE application = ? \"\"\", (entry))\r\n except sq.OperationalError:\r\n return self.err_find", "def delItem(self,row,column):\n data = self.data\n if row in data and column in data[row]:\n del data[row][column]\n self.hasChanged = True", "def removePlayer(self, index):\n serial = self.seats[index]\n self.seats[index]=0\n if serial in self.players:\n del self.players[serial]", "def removeRow(self, index: int) -> None:\n ...", "def rm(self, line):\n self.log('rm({0})'.format(line))\n if line is False:\n return False\n if isinstance(line, str):\n line = line.split('\\n')\n if not isinstance(line, list):\n raise TypeError(\"Parameter 'line' not a 'string' or 'list', is {0}\".format(type(line)))\n local_changes = False\n for this in line:\n if this in self.contents:\n while this in self.contents:\n self.log('Removed \"{0}\" from position {1}'.format(this, self.contents.index(this)))\n self.contents.remove(this)\n self.changed = local_changes = True\n else:\n self.log('\"{0}\" not in {1}'.format(this, self.filename))\n if self.sorted and local_changes:\n self.sort()\n return local_changes", "def del_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() in db:\n db.pop(contact.get_hash_name())\n write_db(db)\n sys.exit(logger.ok('success: contact ' + '\"%s\"' % contact.get_name() + ' deleted'))\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))", "def remove(self, author):\n self.posts.pop(author)\n self.authors.remove(author)", "def delete_node_at_position(self, position):\n\t\tif self.root is None:\n\t\t\traise EmptyRootException(\"ERROR: No node available in list. Please insert node in list.\")\n\t\tnode_count = 0\n\t\tif node_count == position:\n\t\t\tself.delete_node_at_beginning()\n\t\telse:\n\t\t\tcurrent_node = self.root\n\t\t\tfor node_count in range(position-1):\n\t\t\t\tcurrent_node = current_node.next\n\t\t\tcurrent_node.next = current_node.next.next\n\t\t\tself.display_nodes()", "def DELETE(uid: str) -> None:\n\n try:\n record_id = int(uid)\n except ValueError as exc:\n raise cherrypy.HTTPError(400, \"Invalid uid\") from exc\n\n deleted_rows = cherrypy.engine.publish(\n \"bookmarks:remove\",\n record_id\n ).pop()\n\n if not deleted_rows:\n raise cherrypy.HTTPError(404, \"Invalid url\")\n\n cherrypy.response.status = 204", "def remove_edit(self, record):\n if not self.edit_applied:\n return\n\n sequence = record.seq\n logging.debug(\"Removing edit to sequence position %d and offset %d, converting %s=%s to %s\"\n % (self.sequence_position, self.offset,\n sequence[self.sequence_position + self.offset:self.sequence_position + self.offset\n + len(self.edit_to)],\n self.edit_to, self.edit_from))\n assert(sequence[self.sequence_position + self.offset:self.sequence_position + self.offset + len(self.edit_to)]\n == self.edit_to)\n updated_sequence = sequence[:self.sequence_position + self.offset]\n updated_sequence += self.edit_from \n updated_sequence += sequence[self.sequence_position + self.offset + len(self.edit_to):]\n \n record.seq = updated_sequence\n self.edit_applied = False\n self.offset = 0\n return record", "def remove_todo(self, todo):\n self.deleted_items.append(todo)\n print 'removed \"%s\"' % todo.text", "def do_remove(self, arg):\n jail_destroy('remove', arg)", "def remove_reminder(self, reminder_info):\n store = self.load_data(default=[])\n if reminder_info in store:\n index = store.index(reminder_info)\n del store[index]\n self.save_data(store)", "def delete_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.args.get('title', default='')\n category = request.args.get('category', default='')\n buydate = request.args.get('buydate', default='')\n ssid = decrypt_book_record(request.args.get('ssid'))\n\n pre_delete_entry = Entries.query.filter_by(\n id=ssid, title=title, category=category, \\\n buydate=buydate).first()\n\n if pre_delete_entry is not None :\n try :\n db.session.delete(pre_delete_entry)\n db.session.commit()\n flash(u'删除成功')\n except InvalidRequestError as e :\n log_error('error when delete:')\n log_error(e.message)\n #log_error(u'when delete item %s ' % str(pre_delete_entry))\n # DO NOT use the above one for the F UnicodeEncodeError\n log_error(u'when delete item %s ' % pre_delete_entry)\n db.session.flush()\n flash(u'因为数据库操作原因,删除失败')\n else :\n flash(u'删除失败')\n\n return redirect(url_for('show_entries_admin'))", "def remove(self, idx_):\n self._dice.pop(idx_)", "def __delitem__(self, idx):\n self.pop(idx)", "def unmark(self, position):\n\n if self.selected_text_file is None:\n return\n if len(self.case_text) == 0:\n return\n cursor = self.ui.textBrowser.cursorForPosition(position)\n self.ui.textBrowser.setTextCursor(cursor)\n\n location = self.ui.textBrowser.textCursor().selectionStart()\n unmarked = None\n for item in self.case_text:\n if item['pos0'] <= location <= item['pos1']:\n unmarked = item\n if unmarked is None:\n return\n\n # Delete from database, remove from case_text and update gui\n cur = self.app.conn.cursor()\n cur.execute(\"delete from case_text where fid=? and caseid=? and pos0=? and pos1=?\",\n (unmarked['fid'], unmarked['caseid'], unmarked['pos0'], unmarked['pos1']))\n self.app.conn.commit()\n if unmarked in self.case_text:\n self.case_text.remove(unmarked)\n self.unlight()\n self.highlight()\n # The file may be assigned Yes in the table widget but should be empty\n self.get_files()\n self.fill_table()\n self.app.delete_backup = False", "def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")", "def RemoveMarker(self, marker, line):\n assert isinstance(marker, ed_marker.Marker)\n marker.Set(self, line, delete=True)", "def remove_layer(self, layer_pos):\n self.stack.pop(layer_pos)\n return", "def delete_element_from_store(entry_sequence, is_propagated_call = False):\n\t\tglobal board, node_id\n\t\tsuccess = False\n\t\ttry:\n\t\t\tdel board[int(entry_sequence)]\n\t\t\tsuccess = True\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn success", "def remove(self, *args):\n self.__execute(self.pkgin_bin, \"remove\", *args)", "def remove(self, uid):\n marker = object()\n name = self._reverse.get(uid, marker)\n if name is not marker:\n del self._reverse[uid]\n try:\n del self._forward[name]\n except KeyError:\n # If it isn't there, good, that is the outcome we wanted,\n # right?\n pass", "def delete_puush_entry(entry: PuushEntry):\n if entry.identifier is None:\n raise Exception(\"PuushEntry identifier was None!\")\n\n logprint(\"Deleting Puush entry \\\"{name}\\\" (ID: {ident})...\".format(name=entry.filename,\n ident=entry.identifier))\n\n # Prematurely ipdate list of deleted entries' IDs, as it is used by response_texts_to_entries.\n DELETED_ENTRIES_IDS.append(puush.identifier)\n\n # Delete the given puush by id and store the updated list of puush (history?) entries\n return response_texts_to_entries(make_post_request(\n DELETION_API, data={\"k\": config[\"api_key\"], \"i\": entry.identifier}))", "def delete_entry(self, arg):\n nums = self.selector.select_by_index(arg)\n if nums is None or not nums:\n self.visual.error(\"Need a selection to delete.\")\n return\n to_delete = [self.reference_entry_id_list[n] for n in nums]\n old_len, del_len = len(self.reference_entry_id_list), len(to_delete)\n for entry_id in to_delete:\n self.entry_collection.remove(entry_id)\n self.visual.log(\"Deleted entry {}\".format(entry_id))\n remaining = [x for x in self.reference_entry_id_list if x not in to_delete]\n self.visual.log(\"Deleted {}/{} entries, left with {}\".format(del_len, old_len, len(remaining)))\n self.push_reference_list(remaining, \"deletion\", force=True)\n self.unselect()", "def remove_milestone(username, skillpath, level, date, comment):\n date_id = Date.query.filter_by(date=date).first().id\n skill = database_controller.get_skill(skillpath)\n user = database_controller.get_user(username)\n\n to_delete = MilestoneAssociation.query.filter_by(milestone_skill_id=skill.id,\n milestone_users_id=user.id,\n level=level,\n milestone_date_id=date_id,\n comment=comment)\n if to_delete is None:\n raise AttributeError\n to_delete.delete()\n db.session.commit()", "def remove_memo(mid):\n\trecord = {\n\t\t\"_id\": ObjectId(mid)\n\t}\n\tcollection.remove(record)\n\treturn", "def __delitem__(self, where):\n with self._lock:\n self._current_bytes -= self._data[where]\n del self._data[where]\n self._order.remove(where)", "def remove(self, key):\n ndx = self._findPosition(key)\n assert ndx, 'Invalid map key'\n self._entryList.pop(key)", "def rm(args):\n args.delete = True\n return remove(args)", "def removeItem(*args):", "def removeItem(*args):", "def delete_entry(self, dn):\n if dn:\n logger.info(\"delete entry: %s\", dn)\n self._conn.delete_s(dn)", "def remove(self, val):\n if not val in self.record:\n return False\n index = self.record[val]\n self.data[index], self.data[-1] = self.data[-1], self.data[index]\n self.record[self.data[index]] = index\n self.data.pop()\n self.record.pop(val)\n return True", "def del_pos(s):\n if s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"):\n s = s[:-2]\n return s", "def remove(self, x):\n del self[self.index(x)]", "def __delitem__(self, index: Any) -> None:\n del self.contents[index]\n return", "def DeleteParticle(particle, fieldset, time):\n print('particle is deleted') \n #print(particle.lon, particle.lat, particle.depth)\n particle.delete()", "def remove_card(self, card_idx: int) -> None:", "def __delitem__(self, cvs_path):\n\n node = self[cvs_path]\n self._del_entry(cvs_path)\n if isinstance(node, _WritableMirrorDirectoryMixin):\n node._mark_deleted()", "def cleanTileAtPosition(self, pos):\n if self.tiles[int(pos.getY())][int(pos.getX())] == False:\n self.tiles[int(pos.getY())][int(pos.getX())] = True\n self.cleaned += 1", "def delete(pq):\n\tif not pq.empty():\n\t\tn = len(pq.heap.items)\n\t\tindex = r.randint(1, n-1)\n\t\tdeleted = pq.delete(index)\n\t\tlogging.info(\"delete %s, got %s\", index, deleted)", "def remove(self, val):\n ind = self.table.pop(val, None)\n if ind is None:\n return False\n key = self.ls.pop()\n if len(self.ls)!=0 and len(self.ls) != ind:\n self.ls[ind] = key\n self.table[key] = ind\n return True" ]
[ "0.6454446", "0.6282452", "0.6261571", "0.61265475", "0.6009769", "0.59815764", "0.58335143", "0.57944274", "0.5689213", "0.5677288", "0.56020075", "0.55991143", "0.55826235", "0.55457", "0.55305755", "0.55185974", "0.5401358", "0.5388663", "0.5358339", "0.5331406", "0.530921", "0.5306277", "0.5300817", "0.5296484", "0.52889776", "0.5283555", "0.5265191", "0.5234831", "0.52252215", "0.5222931", "0.5217048", "0.51862895", "0.51777196", "0.5163231", "0.5161351", "0.51560533", "0.5148925", "0.5145605", "0.5138079", "0.51195943", "0.51083344", "0.5102869", "0.5092576", "0.5069189", "0.50678897", "0.5064931", "0.50632125", "0.5062967", "0.5057563", "0.50444996", "0.5030318", "0.50186944", "0.5003301", "0.49934313", "0.49832454", "0.49735004", "0.49719232", "0.49617118", "0.49612296", "0.4960168", "0.4957711", "0.49522862", "0.49496594", "0.4941476", "0.49334717", "0.4932183", "0.49301875", "0.49236816", "0.49213788", "0.4910109", "0.490923", "0.49045217", "0.4904031", "0.4902329", "0.48919454", "0.48871902", "0.48853624", "0.4885081", "0.48760578", "0.48568884", "0.48563674", "0.48559564", "0.48558784", "0.48526743", "0.48441458", "0.48385882", "0.48384666", "0.48195598", "0.48195598", "0.48118788", "0.48037288", "0.4802292", "0.48003477", "0.47974712", "0.47881848", "0.47828823", "0.47748008", "0.47744003", "0.47665352", "0.4766188" ]
0.81650764
0
Get entries stored in this journal.
def get_entries(self) -> Generator[str, None, None]: return (entry for entry in self.entries)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEntries(self):\n return self.entries", "def getEntries(self):\n return self.__entries", "def get_entries(self) -> List[Entry]:\n return list(self.__entries)", "def entries(self):\n return self._entries", "def entries(self):\n return self._entries", "def get_entries_all(self):\n if self.database is None:\n raise DatabaseNotOpened('No KeePass Database Opened.')\n else:\n return self.database.find_entries_by_title('.*', \n regex=True)", "def get_entries(self):\n return self._netdis.loxone.entries", "def readentries(self):\n return list(x for x in self)", "def entries():\n\n\treturn [entry.value for entry in db.session.query(Entry).all()]", "def entries(self):\n return [self._entries[key] for key in self._order]", "def get_log_entries(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.LogEntryList(self._results, runtime=self._runtime)", "def all(self):\n self.scan()\n return self.entries", "def entries(self, path: Optional[str] = None) -> list[str]:\n _args = [\n Arg(\"path\", path, None),\n ]\n _ctx = self._select(\"entries\", _args)\n return _ctx.execute_sync(list[str])", "def get_all_journal_rows(session):\n return session.query(\n models.VppEtcdJournal).order_by(\n models.VppEtcdJournal.id).all()", "def read(self):\n return self.readentries()", "def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]", "def entries(self):\n if self.preload_metadata and not self._entries:\n self._entries = dict((self._decode_name(entry.key), entry)\n for entry in self.bucket.list())\n return self._entries", "def get_entries(self):\n return self.find_by_st(\"urn:schemas-denon-com:device:ACT-Denon:1\")", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def entries(self):\n return SpaceEntriesProxy(self._client, self.id)", "def entries(self):\n if not self._lines:\n self._load_file()\n\n return tuple(self._lines)", "def journals(self):\n return self.search(comp_class=Journal)", "def log_entries(self) -> Generator[dict, None, None]:\n if self.log_stream:\n yield from logs.fetch_stream(self.log_stream)\n else:\n yield from []", "def list(self):\n entries = []\n regex = re.compile('^%s' % self.directory)\n\n for obj in self.driver.list(self.directory):\n path, name = os.path.split(obj['Key'])\n entries.append(Entry(self.path(regex.sub(\"\", path)), name))\n\n return entries", "def get_entries(uri):\n if not uri.endswith('/entries'):\n uri += '/entries'\n results = VGOCache(uri).results\n\n results = [ adjust_entry(x) for x in results ]\n return results", "def fetch(self):\n\n entries = []\n for activity in self.activities[\"entries\"]:\n entries.append(\n [\n element\n for element in [activity[\"title\"], activity[\"content\"][0][\"value\"]]\n ]\n )\n\n return entries[0 : self.max_entries]", "def entrySet(self):\n set = HashSet()\n for key in keySet():\n set.add(Entry(key, self.get(key)))\n return set", "def all_entries(cls):\n info = Diary.entries\n response = jsonify({\"data\": info})\n response.status_code = 200\n return response", "def get_entries(\n self,\n entry\n ):\n\n try:\n return self._cache[self._alias[entry]]\n except:\n pass\n\n if entry in self._header:\n \n # get the index\n idx = self._header[entry]\n\n entries = []\n\n for row in self._array:\n tmp = [0 for i in row]\n for i,cell in enumerate(row):\n if cell != 0:\n tmp[i] = self[cell][idx]\n entries.append(tmp)\n\n # add entries to cache\n self._cache[self._alias[entry]] = entries\n\n return entries", "def contents(self):\n # list_folder on \"/\" isn't supported for some reason.\n path = \"\" if self.path == \"/\" else self.path\n result = execute(pdbox.dbx.files_list_folder, path)\n entries = [get_remote(None, meta=e) for e in result.entries]\n\n # TODO: Verify that this works.\n while result.has_more:\n # As long as there are more pages to look through,\n # add their contents to the list of entries.\n more = execute(pdbox.dbx.files_list_folder_continue, result.cursor)\n entries.extend(get_remote(None, meta=e) for e in more)\n\n return entries", "def journal_history(self):\n hist = []\n jour = namedtuple('Journal', 'sourcetitle abbreviation type issn')\n jour_hist = self._json['author-profile'].get('journal-history', {})\n pub_hist = jour_hist.get('journal', [])\n if not isinstance(pub_hist, list):\n pub_hist = [pub_hist]\n for pub in pub_hist:\n new = jour(sourcetitle=pub['sourcetitle'],\n abbreviation=pub.get('sourcetitle-abbrev'),\n type=pub['@type'], issn=pub.get('issn'))\n hist.append(new)\n return hist", "def get_entries(self, history=False, first=False, recursive=True,\n path=None, group=None, **kwargs):\n if self.database is None:\n raise DatabaseNotOpened('No KeePass Database Opened.')\n else:\n if 'regex' in kwargs:\n kwargs['regex'] = is_truthy(kwargs['regex']) \n return self.database.find_entries(recursive=recursive,\n path=path, \n group=group,\n history=history, \n first=first, \n **kwargs)", "def get_entries(audit_id=None, start_time=None):\n al = []\n try:\n if start_time and audit_id:\n raise Exception('Incompatible parameters passed')\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if audit_id:\n query = 'select * from audit where audit_id=\"%d\" order by audit_id desc' % int(\n audit_id)\n else:\n if not start_time:\n query = 'select * from audit order by audit_id desc'\n else:\n query = 'select * from audit where audit_time >= %d order by audit_id desc' % int(\n start_time)\n rows, err = db.get_multiple_rows(db_path, query)\n if err:\n raise Exception(err)\n if rows:\n for row in rows:\n audit_entry, err = _parse_audit_entry(row)\n if err:\n raise Exception(err)\n al.append(audit_entry)\n except Exception, e:\n return None, 'Error loading audit entries : %s' % str(e)\n else:\n return al, None", "def get(self):\n return self.datastore.read_all()", "async def entries(\n self, *, header: Optional[headers.RequestHeader] = None\n ) -> AsyncGenerator[EntriesResponse, None]:\n\n request = EntriesRequest()\n if header is not None:\n request.header = header\n\n async for response in self._unary_stream(\n \"/atomix.indexedmap.IndexedMapService/Entries\", request, EntriesResponse,\n ):\n yield response", "def contents(self):\n entries = []\n walk = next(os.walk(self.path))\n entries.extend(LocalFolder(os.path.join(walk[0], f)) for f in walk[1])\n entries.extend(LocalFile(os.path.join(walk[0], f)) for f in walk[2])\n return entries", "def entry_orders(self):\n return store.orders.get_entry_orders(self.exchange, self.symbol)", "def get_all_access_entries(self) -> QuerySet:\n return self.model.objects.all().order_by(\"created_at\")", "def get_all(self):\n return self.__fetcher.get_fetched()", "def GetRecords(self):\n if not self.key.id():\n logging.error('Key id does not exist.')\n return None\n if self.size < 1:\n return None\n\n string_id = self.key.string_id()\n log_part_keys = [ndb.Key('QuickLog', string_id, 'QuickLogPart', i + 1)\n for i in xrange(self.size)]\n log_parts = ndb.get_multi(log_part_keys)\n serialized = ''.join(l.value for l in log_parts if l is not None)\n try:\n return pickle.loads(serialized)\n except ImportError:\n logging.error('Failed to load QuickLog \"%s\".', string_id)\n return None", "def get_journalless_articles(self):\n self.setQuery(\"\"\"select ?art ?pmid where {\n ?art <http://purl.org/ontology/bibo/pmid> ?pmid .\n filter not exists { ?art <http://vivoweb.org/ontology/core#hasPublicationVenue> ?o }\n }\"\"\")\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n return [(x['art']['value'], x['pmid']['value']) for x in g['results']['bindings']]\n except:\n return None", "def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)", "def fetchJournalEntries(date):\n\t\n\tpattern = '%d/%m/%Y'\n\tdatetime_object = datetime.datetime.strptime(date, pattern)\n\t\n\t#Getting the feeds from respective feed functions\n\tslackFeed = getFromSlack(datetime_object)\n\twebServiceFeed = getFromWebService(datetime_object)\n\tgithubFeed = getFromGitService(datetime_object)\n\tdynamoFeed = getFromDynamo(datetime_object)\n\t\n\t#Combining feeds into a single output\n\tentireFeed = reconcileFeed(slackFeed, webServiceFeed, githubFeed, dynamoFeed)\n\t\n\treturn entireFeed", "def extract(self) -> Entries:\n raise NotImplementedError('This method must be implemented by subclasses')", "def items(self):\n return self._get_storage().items()", "def fetchLogs(self):\n return [record.msg for record in self.handler.buffer]", "def entries(self, data, depth=0):\n return self._entries(self.map(data))", "def get_entries(self, player, entry, cnt):\n\n if not entry:\n return []\n\n entries = [entry]\n \n queue = player.get_property(\"queue-source\")\n if queue:\n querymodel = queue.get_property(\"query-model\")\n l = querymodel.get_next_from_entry(entry)\n while l and len(entries) <= cnt:\n entries.append(l)\n l = querymodel.get_next_from_entry(l)\n source = player.get_property(\"source\")\n if source:\n querymodel = source.get_property(\"query-model\")\n l = querymodel.get_next_from_entry(entry)\n while l and len(entries) <= cnt:\n entries.append(l)\n l = querymodel.get_next_from_entry(l)\n\n return entries", "def get_entries_by_notes(self, notes, regex=False, flags=None,\n group=None, history=False, first=False): \n if self.database is None:\n raise DatabaseNotOpened('No KeePass Database Opened.')\n else:\n return self.database.find_entries_by_notes(notes,\n regex,\n flags,\n group,\n history,\n first)", "def _get_live_entries(self):\n from article.models import Entry\n return self.entry_set.filter(status__exact=Entry.LIVE_STATUS)", "def get_logs(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.LogList(self._results, runtime=self._runtime)", "def get_articles(db:Session):\n return db.query(ArticleModel).all()", "def _get_documents(self) -> Iterable[dict]:\n\n return self._db[\"documents\"]", "def fetchLogRecords(self):\n return self.handler.buffer", "def get_all_entries(start=0, end=-1):\n return r.zrevrange('entry_index', start, end)", "def Entries():\n n = ida_entry.get_entry_qty()\n for i in range(0, n):\n ordinal = ida_entry.get_entry_ordinal(i)\n ea = ida_entry.get_entry(ordinal)\n name = ida_entry.get_entry_name(ordinal)\n yield (i, ordinal, ea, name)", "def get_all(self, _filter=None):\n\t\tres = self.run(self._get_all_query(_filter))\n\t\treturn [BigQueryGithubEntry(_id, code, repo_name, path) for _id, code, repo_name, path in res]", "def records(self):\n return self.db_data['records']", "def store(self):\n articles = []\n for entry in self.feed():\n key = self.datastore_client.key(self.DATASTORE_KIND, entry['id'])\n article = datastore.Entity(key=key)\n article.update(entry)\n articles.append(article)\n self.datastore_client.put_multi(articles)", "def entries(self):\n out = []\n for y,x in self.coords(False):\n out.append((y,x,self.retrieve(y,x)))\n return out", "def get_lines(self):\n return self._container.logs(stream=True)", "def _fetch_journal_data(self, criteria):\n found_journal = self._scieloapi.journals.filter(\n limit=1, **criteria)\n return self._sapi_tools.get_one(found_journal)", "def logs(self):\n if not self._logs:\n self.read_logs()\n return self._logs", "def get_records(self) -> List[DBModelInstance]:\n return self._get_all_records()", "def get_all(self):\n return self.db", "def records(self):\n return self._records", "def queryset(self, request):\n if request.user.is_superuser:\n return Entry.objects.all()\n return Entry.objects.filter(author=request.user)", "def read(self, request, tag=None):\n tags = Tag.objects\n if tag:\n t = tags.get(slug=tag)\n return t.entry_set.all()\n else:\n return tags.all()", "def entry(self):\n\n if not hasattr(self, \"_entry\"):\n self._entry = [\n self.id,\n self.name,\n self.DICTIONARY,\n self.LANGUAGE,\n self.audience,\n Loader.API_VERSION,\n self.json,\n ]\n return self._entry", "def get_all(self):\n return ReadingSet(self._set)", "def get_all(cls):\n return DataStore.get_all_instance(cls)", "def list(self,params=None, headers=None):\n path = '/mandate_import_entries'\n \n\n response = self._perform_request('GET', path, params, headers,\n retry_failures=True)\n return self._resource_for(response)", "def get_all(self):\n list = []\n line = self.get()\n while line:\n list.append(line)\n line = self.get()\n return list", "def get_entries(filename, **kwargs):\n return list(xmlreader.XmlDump(join_xml_data_path(filename),\n **kwargs).parse())", "def getAllEntries(self):\n \n log_entries_dict = collections.defaultdict(list)\n for logfile in os.listdir(self.log_folder):\n log = os.path.join(self.log_folder, logfile)\n with open(log, 'rb') as l:\n logCSVreader = csv.reader(l, delimiter=\"|\")\n logCSVreader.next() # skip header\n try:\n for row in logCSVreader:\n zip_file = row[0]\n log_entries_dict[zip_file].append(row)\n except:\n pass\n return log_entries_dict", "def get_entries_for_topic(cls, topic, entry_id_list):\n\t\tresults = cls.get([cls.create_key(topic, entry_id)\n\t\t\t\t\t\t\t\t\t\t\t for entry_id in entry_id_list])\n\t\t# Filter out those pesky Nones.\n\t\treturn [r for r in results if r]", "def get_entries(self):\n for irde in self.Entries:\n if irde != None:\n if irde.Name & 0x80000000:\n # Points to a Name object\n name = obj.Object(\"_IMAGE_RESOURCE_DIR_STRING_U\", (irde.Name & 0x7FFFFFFF) + self.sectoffset, vm = self.obj_vm, parent = irde)\n else:\n name = int(irde.Name)\n if irde.DataOffset & 0x80000000:\n # We're another DIRECTORY\n retobj = obj.Object(\"_IMAGE_RESOURCE_DIRECTORY\", (irde.DataOffset & 0x7FFFFFFF) + self.sectoffset, vm = self.obj_vm, parent = irde)\n retobj.sectoffset = self.sectoffset\n else:\n # We're a DATA_ENTRY\n retobj = obj.Object(\"_IMAGE_RESOURCE_DATA_ENTRY\", irde.DataOffset + self.sectoffset, vm = self.obj_vm, parent = irde)\n yield (name, bool(irde.DataOffset & 0x80000000), retobj)", "def all():\n # results = [String.from_dict(redis.hgetall(key)) for key in redis.keys() if key != 'index']\n results = []\n for key in redis_store.keys(String.generate_key('*')):\n data = pickle.loads(redis_store.get(key))\n string = String(data['key']).deserialize(data)\n results.append(string)\n return results", "def items(self):\n self._remove_expired()\n\n return self._d.items()", "def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename)\n for filename in filenames if filename.endswith(\".md\")))", "def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename)\n for filename in filenames if filename.endswith(\".md\")))", "def get_all(cls):\n\t\tres = Author.query.filter_by(saved=True) or []\n\t\treturn [el._to_dict() for el in res]", "def list_entries():\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename) for filename in filenames if filename.endswith(\".md\")))", "def get_feed_entries(feed=FEED):\n d = feedparser.parse(feed)\n entries = d.entries\n \n all_entries =[]\n for entry in entries:\n title = entry.title\n link = entry.link\n date = entry.published_parsed\n tags = entry.tags\n tags = [t.get('term').lower() for t in tags]\n\n date = _convert_struct_time_to_dt(date)\n\n\n entry = Entry(date,title,link,tags)\n all_entries.append(entry)\n\n return all_entries", "def get_all_content(self):\n return self._get_all_content()", "def fetch_all(self):\n return list(iter(self))", "def getJournal(self):\n if 'journal' in self.extras: \n return self.extras['journal']\n #--Default \n self.extras['journal'] = _('[No Journal Record Found.]')\n #--Open save file and look for journal entry\n inPath = os.path.join(self.dir,self.name)\n ins = Tes3Reader(self.name,file(inPath,'rb'))\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n if name != 'JOUR':\n ins.seek(size,1,name)\n #--Journal\n else:\n (subName,subSize) = ins.unpackSubHeader('JOUR')\n if subName != 'NAME':\n self.extras['journal'] == _('[Error reading file.]')\n else:\n reDate = re.compile(r'<FONT COLOR=\"9F0000\">(.+?)</FONT><BR>')\n reTopic = re.compile(r'@(.*?)#')\n data = ins.read(subSize)\n data = reDate.sub(ReplJournalDate(),data)\n data = reTopic.sub(r'\\1',data)\n self.extras['journal'] = cstrip(data)\n break\n #--Done\n ins.close()\n print self.extras['journal']\n return self.extras['journal']", "def get_all(self):\n return self.__items", "def get_list(self):\n return sorted(self.__entries.keys())", "def get_reminders(self):\n return self.load_data(default=[])", "def GetLogs(self):\n utcnow = datetime.datetime.utcnow()\n lower_filter = self.log_position.GetFilterLowerBound()\n upper_filter = self.log_position.GetFilterUpperBound(utcnow)\n new_filter = self.base_filters + [lower_filter, upper_filter]\n entries = logging_common.FetchLogs(\n log_filter=' AND '.join(new_filter),\n order_by='ASC',\n limit=self.LOG_BATCH_SIZE)\n return [entry for entry in entries if\n self.log_position.Update(entry.timestamp, entry.insertId)]", "def existing_logs(self):\n temp = list()\n with sqlite3.connect(self.db_file) as conn:\n cur = conn.cursor()\n cur.execute(\"PRAGMA table_info('data')\")\n temp = cur.fetchall()\n # if len(temp) != 0:\n # empty sequence is false\n if temp:\n self._existing_logs = [item[1] for item in temp]\n return self._existing_logs", "def get_all_entries():\n conn = sqlite3.connect(CONF.database, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)\n curs = conn.cursor()\n try:\n return curs.execute(\"SELECT date_time, price FROM rates ORDER BY date_time DESC\").fetchall()\n finally:\n curs.close()\n conn.close()", "def getItems(self):\n for object in self.database:\n print(object)", "def getCacheContents(self):\n return self._cache", "def logs(self):\n return self._logs", "def logs(self):\n return self._logs", "def getEmployees(self):\n return self.employees", "def list(self):\n return self._list(self._path())", "def get_all(self, q=[], marker=None, limit=None, sort_key='timestamp',\n sort_dir='desc', alarms=False, logs=False,\n include_suppress=False, expand=False):\n return self._get_eventlog_collection(marker, limit, sort_key,\n sort_dir, expand=expand, q=q,\n alarms=alarms, logs=logs,\n include_suppress=include_suppress)" ]
[ "0.7713826", "0.76981765", "0.7545487", "0.74680614", "0.74680614", "0.74099416", "0.71468115", "0.7117516", "0.70767915", "0.69040763", "0.69031703", "0.66488487", "0.66306955", "0.65737164", "0.65379286", "0.6522505", "0.6514958", "0.6387671", "0.6290385", "0.6206425", "0.61915183", "0.61820287", "0.611329", "0.61096394", "0.6078619", "0.60360026", "0.6032598", "0.5962708", "0.59441197", "0.5923307", "0.58954024", "0.5872282", "0.5860356", "0.58484715", "0.58331245", "0.576785", "0.576447", "0.575749", "0.5732328", "0.5713726", "0.57111245", "0.57104903", "0.5680771", "0.56729335", "0.5669461", "0.5638616", "0.5636958", "0.56218094", "0.56106603", "0.5609138", "0.5599922", "0.55994964", "0.55961114", "0.55915433", "0.5574285", "0.55534166", "0.5549712", "0.55482835", "0.55294967", "0.5523612", "0.54987425", "0.54947436", "0.5485181", "0.54813457", "0.547977", "0.54780626", "0.54741937", "0.54688877", "0.54584473", "0.54534876", "0.5448564", "0.54434985", "0.5432576", "0.54312015", "0.54303926", "0.5426709", "0.54255795", "0.53941923", "0.53937787", "0.53873146", "0.53873146", "0.53849226", "0.5383297", "0.5361807", "0.53602564", "0.53594685", "0.53583175", "0.53579366", "0.53546214", "0.5343566", "0.53335977", "0.5323115", "0.53203803", "0.53154945", "0.53049153", "0.53039294", "0.53039294", "0.5303025", "0.52827156", "0.527523" ]
0.68382215
11
Save journal entries into a file.
def save(journal: Journal, file: Path) -> None: with open(file, "w") as output: output.writelines(f"{entry}\n" for entry in journal.get_entries())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_txt():\n # open file and append, if it doesn't exist then create it.\n with open('journal_entries.txt', 'a+') as f:\n # .get the input in text widget at the first line, '0th' character, then read until the end\n f.write(\"\\n\" + get_date_time())\n for i in range(len(entries)):\n string = entries[i].get('1.0', 'end-1c')\n if string:\n f.write(\"\\n\" + string)", "def save_exit(name, data):\n jrn_path = build_path(name)\n print(f'... saving new journal entries to {jrn_path} ...')\n with open(jrn_path, 'w') as file:\n for line in data:\n file.write(line + '\\n')\n print('... save complete ...')", "def _SaveEntries(self, entries):\n text = \"entries = \\\\\\n\" + pprint.pformat(entries, 2) + '\\n'\n file_path = os.path.join(self._root_dir, self._options.entries_filename)\n gclient_utils.FileWrite(file_path, text)", "def exportJournal(self):\n\n x = self.tableWidget_journals.currentRow()\n fileName = self.journals[x]['name']\n fileName += \".txt\"\n options = QtGui.QFileDialog.DontResolveSymlinks | QtGui.QFileDialog.ShowDirsOnly\n directory = QtGui.QFileDialog.getExistingDirectory(None, \"Select directory to save file\", os.getenv('HOME'), options)\n if directory:\n fileName = directory + \"/\" + fileName\n print (\"Exporting: to \" + fileName)\n data = self.journals[x]['journal']\n f = open(fileName, 'w')\n f.write(data)\n f.close()\n self.log += \"Journal \" + fileName + \" exported\"\n QtGui.QMessageBox.information(None, \"Journal Export\", str(fileName)+\" exported\\n\")", "def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])", "def write_to_file(entry, file):\n with open(file, \"a\") as f:\n f.write(entry)", "def __save_article_to_file(self, content):\n with open(\"article.txt\", 'w') as out:\n out.write(content)", "def write_entries(self, entries):\n for entry in entries:\n self.write(entry)", "def save(*messages):\n data = Parser.parse_texts(*messages[1:])\n hour = time.strftime(\"_%H_%M_%S\")\n today = time.strftime(\"_%d_%m_%Y\")\n title = Parser.parse_text(messages[0])\n\n file = open(\"./logs/\"+threading.currentThread().getName()+today+\".log\",'a+')\n file.write(\"\\n==\"+title+hour+\"==\\n\")\n if type(data) is dict: #Dictionary with each value being a triplet. From get_all_items\n for key in data.keys():\n file.write(Parser.parse_text(key) + \" -> \"+ Parser.parse_text(str(data[key].x)) +\"\\n\")\n elif type(data) is list: #From get_item, market item, attribute listings\n for listing in data:\n file.write(str(listing.id)+\" - \"+str(listing.price/100)+\" euros\\n\")\n else: #plain text\n file.write(Parser.parse_text(data))\n file.write(\"=====================================\\n\")\n file.close()", "def save_file(self):\n f = open(self._file_name, \"w\")\n try:\n for rental in self.list:\n rental_str = self.obj_to_string(rental)\n f.write(rental_str)\n f.close()\n except Exception as e:\n raise e", "def save(self, path=None):\n if path is None:\n path = self.path\n try:\n with open(path, 'w') as fd:\n for entry in self:\n fd.write('{}\\n'.format(entry))\n except Exception as e:\n raise SSHKeyError('Error writing {}: {}'.format(path, e))", "def save_entry(title, content):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)\n default_storage.save(filename, ContentFile(content))", "def save_entry(title, content):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)\n default_storage.save(filename, ContentFile(content))", "def saveToFile(html):\n #print(\"Saving to file.\")\n html += \"\\n\"\n #open necessary files to save\n logFile = open(\"postLog_{0}_{1}.txt\".format(os.path.splitext(path)[0], dateTimeNow), \"a\")\n logFile.write(html)\n logFile.close()\n #print(\"Check Point.\")", "def write_to_file(self, content):\n try:\n with open(self.full_path_to_file, \"wb\") as fp:\n fp.write(content)\n except PermissionError:\n logging.error(\n \"Conversion cannot be performed. Permission denied for this directory\"\n )\n sys.exit()\n self.logger.info(\"News has been successfully converted\")", "def save_to_file():\n if value.get() == \"----------------------\":\n messagebox.showinfo(\"Choose File\", \"Please choose a file to edit.\", parent=app_frame)\n return\n new_file = [] # save edited information to new_file list for writing to csv file\n for x in range(len(entries)):\n new_row = []\n for y in range(len(entries[x])):\n new_row.append(entries[x][y].get())\n new_file.append(new_row)\n\n file_to_save = value.get() # get name of file to write edited content to\n file_path = lrs_path + file_to_save\n with open(file_path, \"w\") as the_file:\n writer = csv.writer(the_file, lineterminator=\"\\n\")\n for line in new_file:\n writer.writerow(line)\n\n messagebox.showinfo(\"Message\", \"File has been saved.\", parent=app_frame)", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def write_to_file(self, papers, filename):\n\t\tpass", "def save_logs(self, mode):\n\t\tif mode == \"c\":\n\t\t\twith open(LOGS_FULL_PATH, 'wb') as f:\n\t\t\t\tpickle.dump(self.LOGS, f, pickle.HIGHEST_PROTOCOL)\n\t\telif mode == \"a\":\n\t\t\twith open(ARCHIVES_FULL_PATH, 'wb') as f:\n\t\t\t\tpickle.dump(self.ARCHIVES, f, pickle.HIGHEST_PROTOCOL)", "def write_log_to_file(filename, content):\n append_to_file(filename, content)", "def save(self):\n\n print('Bye!')\n try:\n with open('records.txt', 'w') as fh:\n fh.write(str(self._initial_money))\n for n in self._records:\n fh.write('\\n'+n)\n except OSError:\n sys.stderr.write('Cannot open file.\\n')", "def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)", "def save_file(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'wb') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def save_entry(title, content):\n\n title.strip # Remove the spaces from both sides.\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)\n default_storage.save(filename, ContentFile(content))\n return True", "def save_file_(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'w') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def save(self, path):\n f = open(path, 'w')\n f.write(self.content().encode('utf-8'))\n f.close()", "def journal_write(session, k, v):\n entry = models.VppEtcdJournal(k=k, v=v)\n session.add(entry)\n session.flush()", "def save(self, content, address, file):\n full_address = \"/home/red/WAREHOUSE\" + address\n file_path = full_address + \"/\" + file\n\n try:\n os.makedirs(full_address, 0o777, True)\n except OSError:\n pass\n\n write_binary_file(content, file_path)", "def logsave(self):\n log_file = open(self.conf[\"output_prefix\"] + \"_log.txt\", \"w\")\n try:\n log_file.write(self.log)\n finally:\n log_file.close()", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def write_to_file(self, filename: str) -> None:", "def save(self):\n with open(self.file, 'w', encoding='utf-8') as self.contacts_file:\n self.contacts_file.seek(0)\n for line in self.contacts:\n self.contacts_file.write(\",\".join(line))\n self.contacts_file.write(\"\\n\")\n self.contacts_file.truncate()\n self.contacts_file.close()", "def save_to_file(self, data):\n\t\tif self.data_file.write(data):\n\t\t\tprint(\"Data successfully added to file\")\n\t\telse:\n\t\t\tPrint(\"Problem occured during adding to file\")", "def store(self, filename):", "def save(self, file_path):\n with open(file_path, 'w') as file:\n file.write(self.text)\n file.close()", "def write_to_files():\n\t# Create output files\n\toutput = [None, \\\n\t\t open(\"priority-1.txt\", \"w\"), \\\n\t\t open(\"priority-2.txt\", \"w\"), \\\n\t\t open(\"priority-3.txt\", \"w\"), \\\n\t\t open(\"priority-4.txt\", \"w\"), \\\n\t\t open(\"priority-5.txt\", \"w\"), ]\n\n\t# Loop over all fields and write them to the correct file\n\tfor field in sorted(reportlog.keys()):\n\t\tpriority = reportlog[field]['priority']\n\t\tlabel = reportlog[field]['label']\n\n\t\toutput[priority].write(\"intphas_%s\\t%s\\n\" % (field, label))\n\t\toutput[priority].flush()\n\n\t# Close files\n\tfor i in [1,2,3,4,5]:\n\t\toutput[i].close()", "def write_to_file(self, file, content):\n with open(file, 'a') as report_file:\n report_file.write('{}\\n'.format(content))", "def save_to_file(self) -> None:\n abs_errors = 0\n art_errors = 0\n for i, doc in enumerate(tqdm(self.results[\"documents\"])):\n paper, abstract = self.__get_article_and_abstract(doc[\"eid\"])\n if abstract:\n abstract_file = open(self.out_dir + \"[{}] Abstract_{}.txt\".format(doc[\"date\"], doc[\"title\"]), \"w\")\n abstract_file.write(abstract)\n abstract_file.close()\n else:\n abs_errors += 1\n if paper:\n file = open(self.out_dir + \"[{}] {}.txt\".format(doc[\"date\"], doc[\"title\"]), \"w\")\n file.write(paper)\n file.close()\n else:\n art_errors += 1\n print(\"Not saved/found: {} abstracts, {} articles\".format(abs_errors, art_errors))", "def save_history():\n\n mid = get_mid()\n back_file = contact_name + \"_\" + today\n\n if not os.path.isdir(back_path):\n print('WARNING: o {} directory found, creating.').format(back_path)\n os.mkdir(back_path)\n else:\n print(\"OK: {} found.\".format(back_path))\n\n os.chdir(back_path)\n with open(back_file, 'w') as bf:\n for mes in get_todays_history(mid):\n data = \"{}\\n\".format(mes)\n bf.write(data)", "def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)", "def save_log(log, title):\n\tlogfile = open(directory + \"/logs/\" + title + \"_log.txt\", \"w\")\n\tfor line in log:\n\t\tlogfile.write(str(line) + \"\\n\")\n\tlogfile.close()", "def save_review_to_file(lines, filename):\n data = '\\n'.join(lines)\n file = open(filename, 'w')\n file.write(data)\n file.close()", "def write_file(filename, contents):\n try:\n f = open(filename, \"w\")\n for line in contents:\n f.write(line + \"\\n\")\n finally:\n f.close()", "def save_file(self):\n # paginate over deputies and senators getting their fields\n fieldnames = set([])\n congressmen = self.deputies + self.senators\n for data in congressmen:\n fieldnames = fieldnames.union(data.dump().keys())\n\n\n with open(IDENTITY_FILE_UPDATED, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=list(fieldnames), delimiter=';')\n writer.writeheader()\n\n for data in congressmen:\n writer.writerow(data.dump())", "def write_to_file(content, filename):\n if not os.path.isfile(filename): # Checking if file already exists, don't append data if it does.\n for j in range(len(content)): # For each dialog in dialogues array.\n with open(filename, 'a') as file: # Open a text file in append mode and write data into it.\n for k in range(len(content[j][0])):\n file.write('{0} {1}\\n'.format(str(content[j][0][k]).lower().split(\"(\")[0],\n str(content[j][1][k])).lower())", "def save_articles(self, articles):\n for article in articles:\n with open(\n f'{self.data_folder}/articles/{article.get(\"id\")}.json',\n 'w'\n ) as f:\n json.dump(article, f)", "def save_posts(posts):\n\n with open(FILE_NAME, 'wb') as f:\n pickle.dump(posts, f, pickle.HIGHEST_PROTOCOL)", "def save(self, fname):\n pass", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def to_file(self, records):\n self._file_manager.make_dir_when_no_dir(self._directory)\n file = os.path.join(self._directory, self._file_name + '.txt')\n record_lines = [rec.to_string() + \"\\n\" for rec in records]\n self._file_manager.write_lines(file, record_lines)", "def save(self, fname, snver=None):\n self._io.save(fname)", "def save_file(path, text):\n with path.open(mode='w') as f_stream:\n f_stream.write(text)", "def load(journal: Journal, file: Path) -> None:", "def save_to_file(self, file_path):\n if file_path:\n f = open(file_path, 'w')\n for row in range(self.rows):\n f.write(''.join(self.data[row]) + '\\n')\n f.close()", "def save_to_file(self, file_path):\n if file_path:\n f = open(file_path, 'w')\n for row in range(self.rows):\n f.write(''.join(self.data[row]) + '\\n')\n f.close()", "def export(self):\n memento = self.create_memento()\n try:\n f = open(\"story.txt\", \"w\")\n try:\n f.write(memento.__str__())\n finally:\n f.close()\n except IOError:\n print 'IOError while exporting story!'", "def save(self):\n with open(self.__file_path, \"w\", encoding=\"UTF-8\") as file:\n parsed_dict = {\n key: value.to_dict()\n for key, value in self.__objects.items()\n }\n save_data(parsed_dict, file)", "def update_log(file_name, post_id):\n\n with open(file_name, \"w\", encoding=\"utf-8\") as log_file:\n log_file.write(post_id)", "def notification_to_file(filename, notification):\n outfile = open(filename, 'a')\n timestamp = time.localtime(time.time())\n for data in timestamp:\n outfile.write(str(data) + ' ') \n outfile.write(notification + '\\n')\n outfile.close()", "def recordLogsToFile(logpath):\n ret = True\n global LOGLIST\n if not os.path.exists(logpath):\n os.makedirs(logpath)\n\n f = open(logpath+'/TesterUpdatelogs.log','wb')\n LOGLIST = [line+'\\n' for line in LOGLIST]\n try:\n f.truncate()\n f.writelines(LOGLIST)\n except Exception:\n print 'Write logs to path %s failed!' %logpath\n print Exception\n ret = False\n finally:\n f.close()\n return ret", "def save_as(self, filename):\n assert type(filename) == str, 'ERROR: filename should be type str'\n if '.pkl' in filename:\n with open(filename, 'wb') as f:\n dill.dump(self, f)\n else:\n with open(filename + '.pkl', 'wb') as f:\n dill.dump(self, f)", "def save(self, ts):\n with open(self, 'w') as f:\n Timestamp.wrap(ts).dump(f)", "def save(self,fn):\n fn = fn if fn[-4:] == \".pkl\" else fn+\".pkl\"\n with open(fn,\"wb+\") as f:\n pickle.dump(self,f)\n log(\"Saved reader to {}\".format(fn))", "def log_to_file(text, status='INFO'):\n outfile = open(LogName, 'a')\n outfile.write(timestamp()+' - '+status+' - '+str(text)+'\\n')\n outfile.close()", "def __logtofile(self, log_name):\n logger = logging.getLogger(log_name)\n\n file_path = os.path.join(self.log_file_path, log_name + '.txt')\n\n formatter = logging.Formatter('<%(asctime)s> %(levelname)-8s %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\n self.file_handlers[logger] = logging.FileHandler(file_path, mode='w')\n self.file_handlers[logger].setFormatter(formatter)\n self.file_handlers[logger].setLevel(logging.DEBUG)\n logger.addHandler(self.file_handlers[logger])\n\n logger.info('SAVING LOGS IN: %s' % file_path)", "def logbook_save(lb):\n return IMPL.logbook_save(lb)", "def save_file(self, file_name, text):\n\n with open(file_name, 'w') as content_file:\n content = content_file.write(text)", "def save(self):\n\n date = datetime.utcnow().strftime(\"%Y-%m-%d\")\n directory = '%s/xls/%s/' % (PROJECT_DIR, date)\n _file = directory + '/' + self.xls.name\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(_file, 'wb+') as destination:\n [destination.write(chunk) for chunk in self.xls.chunks()]\n self.batch(_file)", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def archive_log(self, f_in, filename):\n if not os.path.isdir('archived'):\n os.makedirs('archived')\n f_out = gzip.open('archived/'+filename+'.gz', 'wb')\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()", "def save(self):\n # TODO: save the file", "def persist_file(tweets, directory):\n log.debug(\"{} tweets to gzipped file\".format(len(tweets)))\n\n filename = join(directory, \"{}.gz\".format(date.today()))\n with gzip.open(filename, \"a+\") as f:\n write(tweets, f)", "def save(self, path_to_save):\n for item in self.data_array:\n item.save(path_to_save+item.file_name)", "def save(file, corpus):\n with open(file, 'w') as f_out:\n f_out.write(corpus)", "def write(self,path,content):\n file_path = os.path.join( self.directory, path)\n with open(file_path, \"w\") as file:\n file.write( content )", "def save(self, export_path: str):", "def to_file(self, filename=None):\n name = None\n if filename is not None:\n name = filename\n elif self.name:\n name = self.name\n\n if name:\n #f = open(self.name, 'w')\n f = codecs.open(name, 'w', encoding='utf-8')\n self.seek(0)\n f.write(self.read())\n f.close()\n else:\n print \"No log_name for this log\"", "def saveAs(self):\n self.saveFile()", "def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n po_archive = root + '/po.csv.%s' % datetime.date.today()\n bl_archive = root + '/bl.csv.%s' % datetime.date.today()\n\n shutil.move(po_filename, po_archive)\n shutil.move(bl_filename, bl_archive)\n\n perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH\n os.chmod(po_archive, perms)\n os.chmod(bl_archive, perms)", "def save(datastream):", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def write_file(poet, info_dict):\r\n\r\n filename = SAVE_PATH + '/' + poet + '/' + str(info_dict['id']) + '_'+ str(info_dict['pagenum']) \\\r\n + '_' + info_dict['id2'] +'_' + info_dict['ord2'] \\\r\n + '_' + info_dict['id3'] + '_' + info_dict['ord3'] \\\r\n + '_' + info_dict['id4'] + '_' + info_dict['ord4'] + '.txt'\r\n\r\n print(filename)\r\n with open(filename, 'w', encoding='utf-16') as f:\r\n txt = ','.join([str(info_dict[k]) for k in KEYS ])\r\n txt = txt + '\\n' + '\\n'.join([x for x in info_dict['beyts']])\r\n f.write(txt)\r\n\r\n\r\n locale.setlocale(locale.LC_ALL, '')\r\n DELIMITER = ';'# if locale.localeconv()['decimal_point'] == ',' else ','\r\n\r\n list_of_lists = [[info_dict[k] for k in KEYS]]\r\n with open('D:/poem/molana.csv', 'a', newline='', encoding='utf-16') as csvfile:\r\n\r\n writer = csv.writer(csvfile, delimiter=DELIMITER)\r\n writer.writerows(list_of_lists)", "def save(self, filename):\n try:\n\n with codecs.open(filename, 'w', sg.__encoding__) as fd:\n for word in sorted(self.__entries.keys()):\n fd.write(\"{:s}\\n\".format(word))\n\n except Exception as e:\n logging.info('Save file failed due to the following error: {:s}'\n ''.format(str(e)))\n return False\n\n return True", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save():", "def save_db():\n # establish connection to db\n conn = sqlite3.connect('journal_entries.db')\n\n # create cursor to execute commands\n c = conn.cursor()\n\n # create table if specified table does not exist\n c.execute(\"\"\"SELECT count(name) \n FROM sqlite_master \n WHERE type='table' AND name='entries' \n \"\"\")\n table_check = c.fetchone()[0]\n if table_check == 0: # does not exist\n c.execute(\"\"\"CREATE TABLE entries (\n date_time text,\n q_1 text,\n q_2 text,\n q_3 text,\n q_4 text,\n q_5 text,\n q_6 text\n )\"\"\")\n\n # insert new row (entry)\n # TODO: refactor code/make flexible. Build dictionary of column, inputs then loop through?\n c.execute(\"INSERT INTO entries VALUES (:date_time, :q_1, :q_2, :q_3, :q_4, :q_5, :q_6)\",\n {\n 'date_time': get_date_time(),\n 'q_1': entries[0].get('1.0', 'end-1c'),\n 'q_2': entries[1].get('1.0', 'end-1c'),\n 'q_3': entries[2].get('1.0', 'end-1c'),\n 'q_4': entries[3].get('1.0', 'end-1c'),\n 'q_5': entries[4].get('1.0', 'end-1c'),\n 'q_6': entries[5].get('1.0', 'end-1c')\n }\n )\n\n # commit changes\n conn.commit()\n\n # c.execute(\"SELECT * from entries\")\n # print(c.fetchall())\n\n # close connection\n conn.close()", "def log_write(log_f, text, action='a'):\n\n f = open(log_f, action)\n f.write(text)\n f.close()", "def dump_processed_data_to_file(self, facts, accu_label, article_label, imprison_label):\r\n data = [facts, accu_label, article_label, imprison_label]\r\n with open(util.MID_DATA_PKL_FILE_LOC, \"wb\") as f:\r\n pickle.dump(data, f)\r\n if util.DEBUG:\r\n print(\"DEBUG: data dumped to `.pkl` file\")", "def save_file(self, filename):\r\n \r\n f = open(filename,'w')\r\n f.write(self.body)\r\n f.close", "def save(self, filename):\n o = open(filename, 'w')\n o.write(self.write())\n o.close()", "def export(self):\n\t\tif ( self.saveFileStr.get() not in self.saveDefault ):\n\t\t\t# Set the tempo\n\t\t\ttempoObject = tempo.MetronomeMark( None, \n\t\t\t\t\t\t\t\t\t\t\t\tint(self.tempo.get()), \n\t\t\t\t\t\t\t\t\t\t\t\tnote.QuarterNote() )\n\t\t\tself.transcribedPart.insert(tempoObject)\n\t\t\t\n\t\t\t# Write to disk\n\t\t\tsuccess = self.transcribedPart.write(fp=self.saveFile)\n\t\t\tif ( success ):\n\t\t\t\tsaveMsg = \"Your file has been saved to %s.\" % success\n\t\t\t\ttkMessageBox.showinfo(\"File saved!\", saveMsg )\n\t\telif ( self.saveFileStr.get() == \"\" ):\n\t\t\tself.saveFileStr.set(self.saveDefault)\t\t\n\t\t\tpass\n\t\telse:\n\t\t\t# Don't have a save location... should get that\n\t\t\tself.getSavePath()\n\t\t\tself.export()", "def save(self, fn: str) -> None:\n fout = open(fn, 'w')\n for t,x in zip(self.times,self.milestones):\n fout.write('%f\\t%d '%(t,len(x)))\n fout.write(' '.join([str(xi) for xi in x]))\n fout.write('\\n')\n fout.close()", "def save(self):\n memento = self.create_memento()\n import datetime\n f = open(str(datetime.datetime.now()).replace(' ','_')+'.saved_story','w')\n cPickle.dump(memento,f)\n f.close()\n zcanvas.message(\"Saved!\")", "def save(self, path):\n with open(path, 'wb') as f:\n pkl.dump(self, f)", "def save(self):\n path = self.user.get_session_path()\n with open(path, 'a', encoding='utf8') as file:\n self.write(file=file)", "def save_seqs_to_file(self):\n if self.blast_type == 'local':\n self.seq_file = os.path.join(self.cwd,\n 'db',\n \"{0}_seqs.fas\".format(self.gene_code))\n queryset = Sequences.objects.all().filter(gene_code=self.gene_code)\n\n my_records = []\n for i in queryset:\n item_id = i.code_id + '|' + i.gene_code\n seq = self.strip_question_marks(i.sequences)\n if seq != '':\n seq_record = SeqRecord(Seq(seq), id=item_id)\n my_records.append(seq_record)\n SeqIO.write(my_records, self.seq_file, \"fasta\")", "def save_as(self, filename):\n # Join together the buffer contents so it can be written to a file\n contents = \"\"\n for line in self.buffer.get_lines():\n contents += line + '\\n'\n \n # Attempt to open or create the file and write the contents to it\n try:\n with open(filename, 'w') as f:\n f.write(contents)\n self.filename = filename\n self.has_changes = False\n self.message = \"Succesfully saved: '{}'\".format(filename)\n return True\n except:\n self.message = \"Error writing file. File not saved!\"\n return False", "def write(self):\n with open(\"log.txt\", 'w') as f:\n for message in self.message_list:\n f.write(message + \"\\n\")" ]
[ "0.71133924", "0.70640576", "0.68299794", "0.68087065", "0.6638848", "0.6473705", "0.64459836", "0.61746615", "0.61529505", "0.61082065", "0.6088982", "0.60406435", "0.60406435", "0.5983607", "0.5972086", "0.59578437", "0.5955684", "0.59284395", "0.58410317", "0.58036464", "0.57855684", "0.57762414", "0.57509446", "0.57506305", "0.574192", "0.57091504", "0.5706521", "0.5703111", "0.5701415", "0.56929386", "0.56809044", "0.5655289", "0.56485176", "0.5610159", "0.5604323", "0.55998594", "0.55819404", "0.55702657", "0.55636007", "0.5549252", "0.55485934", "0.554717", "0.5538033", "0.553763", "0.5536085", "0.5534679", "0.55323386", "0.5514232", "0.5512672", "0.55075467", "0.55050194", "0.54988056", "0.54944307", "0.5493213", "0.5493213", "0.5488784", "0.5485556", "0.54847", "0.5482864", "0.5477943", "0.5477029", "0.5471955", "0.5471838", "0.5465347", "0.54598236", "0.5458203", "0.5451249", "0.5450147", "0.54436636", "0.5442836", "0.54374486", "0.54202425", "0.54095566", "0.5394249", "0.53850514", "0.5384508", "0.53840226", "0.53822005", "0.53789306", "0.53770757", "0.53757834", "0.537362", "0.53710616", "0.53650975", "0.53650975", "0.53650975", "0.5364738", "0.5364372", "0.5350852", "0.53446776", "0.53385407", "0.53376853", "0.53371197", "0.5334922", "0.5330371", "0.5329128", "0.53207487", "0.5315001", "0.5312937", "0.53126305" ]
0.8384327
0
Load journal entries from a file.
def load(journal: Journal, file: Path) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(name):\n jrn_path = build_path(name)\n if not os.path.exists(jrn_path):\n print(f'... journal file \\'{jrn_path}\\' does not exist ...')\n print('... initializing new journal ...')\n with open(jrn_path, 'w') as file:\n pass\n return []\n else:\n print(f'... loading journal entries from {jrn_path} ...')\n journal = []\n with open(jrn_path, 'r') as file:\n for line in file:\n journal.append(line.rstrip())\n print(f'... loaded {len(journal)} items')\n return journal", "def load_employees(file_path):\n\temployees = []\n\tfor line in open(file_path):\n\t\temployee = Employee.employee_from_insert_stmnt(line)\n\t\tif employee:\n\t\t\temployees.append(employee)\n\treturn employees", "def load_file(self):\n try:\n f = open(self._file_name, \"r\")\n line = f.readline()\n while len(line) > 0:\n super(RentalHistoryText, self).add_rental(self.string_to_obj(line))\n line = f.readline()\n f.close()\n except IOError as e:\n raise e", "def __loadFromFile(self):\n fh = open(self.__fileName)\n for line in fh:\n if line.strip() == \" \":\n continue # we have an empty line, just skip\n sub = self.__createSubjectFromLine(line)\n # invoke the store method from the base class\n SubjectsRepo.store_subject(self, sub)\n fh.close()", "def load_from_file(self, file_path):\n for line in open(file_path, 'r'):\n term = line.rstrip('\\n')\n self.add(term)", "def load_file(self, file_path):\n with open(file_path, \"r\") as mappings_file:\n for raw_line in mappings_file:\n line = raw_line.split()\n # Add new record to the records dictionary.\n new_record = Record(line[0], line[1], line[2], line[3])\n self.add_record(new_record)", "def load_journal_json(self, absolute_path):\n with open(absolute_path) as json_file:\n data = json.load(json_file)\n\n return data", "def from_file(self, filename=None):\n if not self.name:\n #we don't have a file associated with the EntryList:\n if not filename:\n print \"UNKNOWN FILE!\"\n exit\n else:\n self.name = filename\n \n elif filename and filename != self.name:\n #ambiguous which file to use\n print \"different file than what log was initialized with\"\n exit\n \n else:\n #we have an original filename and none passed in\n #or the original filename equals the one passed in\n #should be good to go\n pass\n\n if os.path.exists(self.name):\n\n #f = open(self.name, \"U\")\n #2009.04.02 20:44:31 \n #very strange behavior when opening up utf-8 files\n #characters get reincoded\n #this is especially prominent when using check_feed.py\n #was using latin_1... going back to utf-8\n #f = codecs.open(self.name, encoding='latin_1')\n #codecs.ignore_errors(UnicodeDecodeError) \n f = codecs.open(self.name, encoding='utf-8', errors='ignore')\n\n self.write(f.read())\n f.close\n\n self.seek(0)\n\n else:\n print \"NO FILE ASSOCIATED WITH LOG: %s\" % self.name", "def read (self, path):\n\n\t\tself.data = []\n\t\t# print \"*** path: %s***\" % path\n\t\tdir, filename = os.path.split (path)\n\t\troot, ext = os.path.splitext (filename)\n\t\t# encoding = 'ISO-8859-1' # utf-8\n\t\ts = codecs.open(path,'r', self.encoding).read()\n\t\t## s = unicode(f.read(),'utf-8')\n\t\ts = self.preprocess (s)\n\t\tlines = split (s, self.linesep)\n\t\tschema = self.splitline(lines[0])\n\n\t\t## print \"** %s **\" % os.path.splitext(filename)[0]\n\t\tif self.verbose:\n\t\t\tprint \"read %d lines from %s\" % (len(lines), path)\n\n\t\tfor i in range(1,len(lines)):\n\t\t\tif not lines[i].strip(): \n\t\t\t\t# print 'skipping line (%d)' % i\n\t\t\t\tcontinue\n\t\t\tfields = self.splitline(lines[i])\n\t\t\titem = self.entry_class (fields, schema)\n\t\t\tif self.accept (item):\n\t\t\t\tself.add (item)\n\n\t\tself.schema = schema\n\t\t# self.data.sort (lastNameCmp)", "def load_articles():\n\t\n\tlog(\"Reading articles from file: articles_dumped...\")\n\tf = open(os.path.join(logdir, \"articles_dumped\"), 'rb')\n\tdumped = f.read()\n\tf.close()\n\t\n\tarticles = pickle.loads(dumped)\n\t\n\tlog(\"Done!\")\n\tsys.stdout.write(\"Done!\\n\")\n\tsys.stdout.flush()\n\t\n\treturn articles", "def load_recipes_from_file(cls, args):\n with open(args.recipes_file, 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n cls._recipes.append(row)\n cls._add_indices_to_recipes()\n cls._initialize_recipes_status()\n logging.info(\"Recipes loaded.\")", "def save(journal: Journal, file: Path) -> None:\n with open(file, \"w\") as output:\n output.writelines(f\"{entry}\\n\" for entry in journal.get_entries())", "def load_file(filename):\n with open(path.join(PATH_ROOT, filename), \"r\") as in_file:\n return in_file.readlines()", "def loadFile(self, path):\n books_added = 0\n records_added = 0\n books_to_clean = set()\n\n PDEBUG('Loading from file: %s', path)\n\n with open(path) as fd:\n while True:\n content = fd.read(PAGE_SIZE)\n if content is None:\n break\n if len(content) == 0:\n break\n pos = 0\n while True:\n m = R_MATCH_ENTRY.search(content, pos)\n if m is None:\n new_content = fd.read(PAGE_SIZE)\n if len(new_content) == 0:\n print('New books: %d, new records: %d' %\n (books_added, records_added))\n print('EOF reached...')\n return (books_added, records_added)\n else:\n content = content[pos:] + new_content\n pos = 0\n else:\n (book, author) = process_book_name(m.group(1))\n book = handleStr(book)\n author = handleStr(author)\n page = handleStr(m.group(2).strip())\n time = handleStr(m.group(3).strip())\n mark = handleStr(m.group(4).strip())\n pos = m.end(0)\n\n bts = book.encode()\n if bts[0:3] == codecs.BOM_UTF8:\n PDEBUG('oops: ')\n PDEBUG('%X-%X-%X', bts[0], bts[1], bts[2])\n\n sys.exit()\n\n if len(mark) == 0:\n continue\n\n res = R_MATCH_POS.match(page)\n if res is None:\n res = R_MATCH_PAGE.match(page)\n if res is None:\n PDEBUG('oops: %s -- %s', book, page)\n sys.exit(1)\n\n pos_str = res.group(1)\n typ_str = res.group(2)\n\n (new_book, new_clip) = \\\n self.__addEntry__(\n book, author, pos_str, typ_str, time, mark)\n\n if new_book:\n books_added += 1\n\n if new_clip:\n books_to_clean.add(book)\n records_added += 1\n\n if books_to_clean:\n PDEBUG('Books to clean: %s', books_to_clean)\n\n for book in books_to_clean:\n self.cleanUpBook(book)\n\n print('Total books added: %d, clips added:%d' %\n (books_added, records_added))\n\n return (books_added, records_added)", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def _load_file(self, log_file, message_name_filter_list):\n if isinstance(log_file, str):\n self._file_handle = open(log_file, \"rb\") #pylint: disable=consider-using-with\n else:\n self._file_handle = log_file\n\n # parse the whole file\n self._read_file_header()\n self._last_timestamp = self._start_timestamp\n self._read_file_definitions()\n\n if self._debug:\n print(\"header end offset: {:}\".format(self._file_handle.tell()))\n\n if self.has_data_appended and len(self._appended_offsets) > 0:\n if self._debug:\n print('This file has data appended')\n for offset in self._appended_offsets:\n self._read_file_data(message_name_filter_list, read_until=offset)\n self._file_handle.seek(offset)\n\n # read the whole file, or the rest if data appended\n self._read_file_data(message_name_filter_list)\n\n self._file_handle.close()\n del self._file_handle", "def load_posts():\n \n with open(FILE_NAME, 'r') as f:\n return pickle.load(f)", "def load_text_file(file_path: str):\n with open(file_path) as f:\n content = f.readlines()\n return content", "def load_file(file_name):\n with open(file_name,\"r\") as f:\n return f.readlines()", "def load_file(filename):\n with open(filename, \"r\") as f:\n return f.readlines()", "def load_data(path):\n with open(path) as f:\n return f.readlines()", "def load_biblio(self, file_name, preload_ids=False, chunksize=1000):\n\n logger.info( \"Loading biblio data from [{}], with chunk size {}. Preload IDs? {}\".format(file_name, chunksize, preload_ids) )\n\n input_file = codecs.open(file_name, 'r', 'utf-8')\n biblio = json.load(input_file)\n\n sql_alc_conn = self.db.connect()\n db_api_conn = sql_alc_conn.connection\n\n if (\"cx_oracle\" in str(self.db.dialect)):\n title_ins = DBBatcher(db_api_conn, 'insert into schembl_document_title (schembl_doc_id, lang, text) values (:1, :2, :3)')\n classes_ins = DBBatcher(db_api_conn, 'insert into schembl_document_class (schembl_doc_id, class, system) values (:1, :2, :3)')\n else:\n title_ins = DBBatcher(db_api_conn, 'insert into schembl_document_title (schembl_doc_id, lang, text) values (%s, %s, %s)')\n classes_ins = DBBatcher(db_api_conn, 'insert into schembl_document_class (schembl_doc_id, class, system) values (%s, %s, %s)')\n\n\n ########################################################################\n # STEP 1: If overwriting, find extant docs and pre-populate doc ID map #\n ########################################################################\n\n extant_docs = set()\n\n if self.overwrite or preload_ids:\n\n for chunk in chunks(biblio, chunksize):\n\n # Loop over all biblio entries in this chunk\n doc_nums = set()\n for bib in chunk[1]:\n\n input_pubnum = self._extract_pubnumber(bib)\n\n # Early return: don't bother querying if we already have an ID\n if input_pubnum in self.doc_id_map:\n extant_docs.add( input_pubnum ) \n continue\n\n doc_nums.add(input_pubnum)\n\n if len(doc_nums) == 0:\n continue\n\n self._fill_doc_id_map(doc_nums, sql_alc_conn, extant_docs)\n\n logger.info( \"Discovered {} existing IDs for {} input documents\".format( len(extant_docs),len(biblio)) )\n\n\n ########################################################\n # STEP 2: Main biblio record processing loop (chunked) #\n ########################################################\n\n for chunk in chunks(biblio, chunksize):\n\n logger.debug( \"Processing {} biblio records, up to index {}\".format(len(chunk[1]), chunk[0]) )\n\n new_doc_mappings = dict() # Collection IDs for totally new document \n overwrite_docs = [] # Document records for overwriting\n duplicate_docs = set() # Set of duplicates to read IDs for\n known_count = 0 # Count of known documents\n\n new_titles = []\n new_classes = [] \n\n doc_insert_time = 0\n\n\n transaction = sql_alc_conn.begin()\n\n for bib in chunk[1]:\n\n ########################################\n # STEP 2.1 Extract core biblio records #\n ########################################\n\n family_id, pubdate, pubnumber, assign_applic = self._extract_core_biblio(bib)\n\n life_sci_relevant = self._extract_life_sci_relevance(bib)\n\n\n ####################################################\n # Step 2.2 Overwrite or Insert the document record #\n ####################################################\n\n if pubnumber in extant_docs:\n\n known_count += 1\n\n if self.overwrite:\n # Create an overwrite record\n doc_id = self.doc_id_map[pubnumber] \n overwrite_docs.append({\n 'extant_id' : doc_id,\n 'new_published' : pubdate,\n 'new_family_id' : family_id,\n 'new_life_sci_relevant' : life_sci_relevant,\n 'new_assign_applic' : assign_applic })\n else:\n # The document is known, and we're not overwriting: skip\n continue\n\n else:\n \n # Create a new record for the document\n record = {\n 'scpn' : pubnumber,\n 'published' : pubdate,\n 'family_id' : family_id,\n 'assign_applic' : assign_applic,\n 'life_sci_relevant' : int(life_sci_relevant) }\n \n try:\n\n start = time.time()\n result = sql_alc_conn.execute( self.docs.insert(), record )\n end = time.time()\n\n doc_insert_time += (end-start)\n\n except Exception, exc:\n\n if exc.__class__.__name__ != \"IntegrityError\":\n raise\n\n elif self.allow_document_dups:\n\n # It's an integrity error, and duplicates are allowed.\n known_count += 1\n duplicate_docs.add(pubnumber)\n\n # Reset transaction\n transaction.commit()\n transaction = sql_alc_conn.begin()\n continue \n\n else:\n\n raise RuntimeError(\n \"An Integrity error was detected when inserting document {}. This \"\\\n \"indicates insertion of an existing document, but duplicates have been disallowed\".format(pubnumber))\n\n\n doc_id = result.inserted_primary_key[0] # Single PK\n new_doc_mappings[pubnumber] = doc_id\n\n self._extract_detailed_biblio(bib, doc_id, new_classes, new_titles, pubnumber)\n\n # Commit the new document records, then update the in-memory mapping with the new IDs\n transaction.commit()\n self.doc_id_map.update(new_doc_mappings)\n\n logger.info(\"Processed {} document records: {} new, {} duplicates. DB insertion time = {:.3f}\".format( len(chunk[1]), len(new_doc_mappings), known_count, doc_insert_time))\n\n\n ########################################################\n # STEP 2.2: Deal with document overwrites / duplicates #\n ########################################################\n\n if len(overwrite_docs) > 0:\n\n transaction = sql_alc_conn.begin()\n\n # Update the master record for the document that's being overwritten\n stmt = self.docs.update().\\\n where(self.docs.c.id == bindparam('extant_id')).\\\n values(published=bindparam('new_published'), \n family_id=bindparam('new_family_id'), \n life_sci_relevant=bindparam('new_life_sci_relevant'),\n assign_applic=bindparam('new_assign_applic'))\n\n sql_alc_conn.execute(stmt, overwrite_docs)\n\n # Clean out ALL other references to the document, for re-insertion\n delete_ids = [record['extant_id'] for record in overwrite_docs]\n\n stmt = self.titles.delete().where( self.titles.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n stmt = self.classes.delete().where( self.classes.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n stmt = self.chem_mapping.delete().where( self.chem_mapping.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n transaction.commit()\n\n logger.info(\"Overwrote {} duplicate documents (master doc record updated, all other references deleted)\".format(len(overwrite_docs)))\n\n if len(duplicate_docs) > 0:\n self._fill_doc_id_map(duplicate_docs, sql_alc_conn)\n\n logger.info(\"Read {} IDs for duplicate documents\".format(len(duplicate_docs)))\n\n ########################################################\n # STEP 2.3: Bulk insertion of titles / classifications #\n ########################################################\n\n\n # Bulk insert titles and classification\n if self.load_titles:\n title_ins.execute(new_titles)\n logger.debug(\"Insertion of {} titles completed\".format(len(new_titles)) )\n\n if self.load_classifications:\n classes_ins.execute(new_classes)\n logger.debug(\"Insertion of {} classification completed\".format(len(new_classes)) )\n\n # END of main biblio processing loop\n\n # Clean up resources\n title_ins.close()\n classes_ins.close()\n sql_alc_conn.close()\n input_file.close()\n\n logger.info(\"Biblio import completed\" )", "def load(logFile):\n pass #TODO", "def load_list_of_entries(list_of_files):\n publication_entries = []\n entries = []\n titles = []\n\n for filename in list_of_files:\n entries_list = load_entries(filename)\n\n for e in entries_list:\n if e.main_publication:\n publication_entries.append(e)\n elif e.title not in titles:\n titles.append(e.title)\n entries.append(e)\n\n return publication_entries, entries", "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def readFile(filename):\n\twith open(filename, 'rU') as csvIN:\n\t\tnext(csvIN)\n\t\toutCSV=(line for line in csv.reader(csvIN, dialect='excel'))\n\t\tfor row in outCSV:\n e = Entry(row)\n e.pass_import()", "def load_txt(filename, **kwargs):\n with sys_open(filename, 'r', **kwargs) as f:\n return f.readlines()", "def _load(self, filename):\n with open(filename) as fp:\n reader = csv.DictReader(fp)\n self.events = list(reader)", "def __loadFromFile(self):\n fh = open(self.__fileName)\n for line in fh:\n if line.strip() == \" \":\n continue # we have an empty line, just skip\n st = self.__createStudentFromLine(line)\n # invoke the store method from the base class\n StudentsRepo.store_student(self, st)\n fh.close()", "def load(self, file_name):\n try:\n [self.add_word(w) for w in open(file_name).read().splitlines()]\n except IOError as e:\n print(e)", "def read_from_file(self, filename: str) -> None:", "def read_content_load(self, filename):\n str_file_woc = self.import_file(filename)\n self.parse_load(str_file_woc)\n self.disp_load_info()", "def load_invites(invite_filename):\n\n # Write code here to loop over invite data and populate DB.\n\n print(\"Invites\")\n\n for i, row in enumerate(open(invite_filename)):\n row = row.rstrip()\n\n values_list = row.split()\n\n user_id, event_id, rsvp = values_list\n\n user_id = int(user_id)\n event_id = int(event_id)\n\n # Instantiate invite\n invite = Invitation(user_id=user_id,\n event_id=event_id,\n rsvp=rsvp)\n\n # Add invite to session\n db.session.add(invite)\n\n # Commit all invite instances to DB\n db.session.commit()", "def load_from_file(self, file_path):\n board_f = open(file_path, 'r')\n row = board_f.readline().strip('\\n')\n self.data = []\n while row != '':\n self.data.append(list(row.split()))\n row = board_f.readline().strip('\\n')\n board_f.close()", "def load_from_file(self, file_path):\n board_f = open(file_path, 'r')\n row = board_f.readline().strip('\\n')\n self.data = []\n while row != '':\n self.data.append(list(row.split()))\n row = board_f.readline().strip('\\n')\n board_f.close()", "def load(self, file):\n self._load(file.encode())", "def readfile(self):\n try:\n with open(filename, mode=\"r\") as fileobject:\n for line in fileobject:\n line = line.rstrip()\n self.__domainlist.append(line)\n\n fileobject.close()\n except:\n print(\"Error when reading file\")", "def load_input(self, file_name):\n with open(file_name, \"r\") as in_file:\n self.all_lines = [line.rstrip('\\n') for line in in_file]", "def load_file(file):\r\n\r\n try:\r\n with open(Path(file), \"r\", encoding=\"utf-8\", newline=\"\") as f:\r\n txt_file = f.read()\r\n except:\r\n sys.exit(\"IO_Tools: ERROR: \"+str(file)+\" not found!\")\r\n \r\n lines = txt_file.split(\"\\n\")\r\n\r\n return lines", "def loadFromFile(self, path):\n\n if \"~\" in path:\n path = os.path.expanduser(path)\n f = open(path)\n body = f.read()\n f.close()\n self._path = path\n self.loadFromString(body)", "def _load_file(self):\n try:\n with open(self.path) as f:\n conf_lines = f.readlines()\n except Exception:\n sys.stderr.write(\"open('%s') failed: %s\\n\" %\n (self.path, sys.exc_info()[1]))\n raise\n\n for lineno, line in enumerate(conf_lines):\n entry = self._handler(line, lineno, self.syspaths)\n self._lines.append(entry)\n self._linemap[lineno] = entry", "def load_file ( self, _file ):\n new_repos = read_repofile ( _file,\n distroot=self.distroot,\n force_distroot=self.force_distroot\n )\n if new_repos:\n self.repos.extend ( new_repos )", "def read_data(cls, input_file, quotechar=None):\r\n if 'pkl' in str(input_file):\r\n lines = load_pickle(input_file)\r\n else:\r\n lines = input_file\r\n return lines", "def process_file(filename):\n print \"Reading and Parsing File: {}\".format(filename)\n parsed_entries = file_parser(filename)\n print \"Starting to Process Entries\"\n chunked_entires = chunk_entries(parsed_entries)\n return [process_entries(entry) for entry in chunked_entires]", "def load_budgets():\n\n Budget.query.delete()\n\n with open(budget_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n budget_data = row.split(\",\")\n id = int(budget_data[0])\n budget = budget_data[1]\n category_id = budget_data[2]\n budget_userid = budget_data[3]\n budget_start_date = budget_data[4]\n budget_end_date = budget_data[5]\n\n budget = Budget(\n id = id,\n budget = budget,\n category_id = category_id,\n budget_userid = budget_userid,\n budget_start_date = get_datetime(budget_start_date),\n budget_end_date = get_datetime(budget_end_date)\n )\n\n db.session.add(budget)\n\n db.session.commit()", "def parse_file(self, file_lines):\n # separate the file into chunks of text\n chunks, chunk = [], []\n # check to see what format the corpus is in, we assume that the headers are the same for all\n # texts in the file... (maybe not safe?)\n if re.match('Primary publication:', file_lines[0]):\n header = re.compile('Primary publication:')\n else:\n header = re.compile(r'&?P\\d{6}')\n for line in file_lines:\n if header.match(line):\n if len(chunk) > 0: # pylint: disable=len-as-condition\n chunks.append(chunk)\n chunk = [line]\n else:\n if len(line) > 0: # pylint: disable=len-as-condition\n chunk.append(line)\n chunks.append(chunk)\n self.chunks = chunks\n # create a rich catalog from the chunks\n re_translit = re.compile(r'(\\d+\\'?\\.) ?(.*)')\n re_normaliz = re.compile(r'(#tr\\.ts:) ?(.*)')\n re_translat = re.compile(r'(#tr\\.en:) ?(.*)')\n for chunk in self.chunks:\n text = chunk\n if chunk[0].startswith('Primary publication:'):\n # we've got full metadata, add additional parsing later\n metadata = chunk[:25]\n text = chunk[26:]\n else: # no metadata\n metadata = []\n pnum = ''.join([c for c in text[0].split('=')[0] if c != '&']).rstrip()\n edition = text[0].split('=')[1].lstrip()\n text = text[3:]\n translit = []\n normaliz = []\n translat = []\n for line in text:\n if re.match(r'\\d+\\'?\\.', line):\n translit.append(re_translit.match(line).groups()[1])\n if line.startswith('#tr.ts:'):\n normaliz.append(re_normaliz.match(line).groups()[1])\n if line.startswith('#tr.en:'):\n translat.append(re_translat.match(line).groups()[1])\n self.catalog[pnum] = {'metadata': metadata,\n 'pnum': pnum,\n 'edition': edition,\n 'raw_text': text,\n 'transliteration': translit,\n 'normalization': normaliz,\n 'translation': translat}", "def _from_file(self, path):\r\n\r\n file = pathlib.Path(path)\r\n log.info(\"Extracting data from file: {}\".format(file))\r\n\r\n with open(file, 'r') as f:\r\n lines = f.readlines()\r\n\r\n self._extract_raw_data(lines)\r\n\r\n return", "def loadFile(fileName) :\n\tlines = []\n\twith open(fileName, 'r') as inFile :\n\t\tfor line in inFile :\n\t\t\tlines.append(line)\n\treturn lines", "def load_from_file(cls, file=None, file_path=None):\n if not file:\n file = open(file_path, 'r') \n if not file_path:\n file_path = file.name\n with file:\n file_meta = cls._get_file_meta(file, file_path=file_path)\n cls_properties = dict([[p, file_meta.get(p, None)] for p in cls.properties()])\n cls(key_name=file_path, **cls_properties).put()", "def load_arquivo_eventos(self, file):\n validfile, mensagem = self.valid_file(file,\n extensions=['json', 'bson', 'zip'])\n if not validfile:\n raise Exception(mensagem)\n if 'zip' in file.filename:\n file = ZipFile(file)\n content = file.read()\n content = content.decode('utf-8')\n eventos = json.loads(content)\n return eventos", "def load_data(file_path):\n file = open(file_path, \"rb\")\n tweets = []\n \n while(True):\n tweet = file.readline().decode(\"utf-8\")\n if(tweet == \"\"): # EOF\n break\n \n tweet = tweet[:-1] # Remove the \\n\n tweets.append(tweet) # We want to store a list of words\n \n file.close()\n print(\"Successfully loaded data from \" + file_path)\n return tweets", "def fromfile(cls, f):\n raise NotImplementedError(\"ScalableRedisLocalBloomFilter not support fromfile\")", "def load_categories():\n\n Category.query.delete()\n\n with open(category_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n categories_data = row.split(\",\")\n\n id = int(categories_data[0])\n category = categories_data[1]\n\n category_model = Category(id=id, category=category)\n db.session.add(category_model)\n db.session.commit()", "def init_from_file(self, filepath, effects_log):\n df = read_input_file(filepath, effects_log, index_col=0)\n\n key = pd.Series(zip(\n df['vehicle_id'],\n df['calendar_year']\n ))\n df.set_index(key, inplace=True)\n\n self._dict = df.to_dict('index')", "def load_meals(path, ingridients):\n with open(file=path, encoding=\"utf-8-sig\", mode=\"r\") as file:\n meals = {} # meal_name: Meal_namedtuple\n header = [x.strip() for x in file.readline().strip().split(\";\")]\n lines = file.readlines()\n for line in lines:\n line = line.strip().split(\";\")\n # strings are converte to apropiate type\n data = map(lambda x: [y.strip() for y in x.strip().split(\",\")]\n if \",\" in x else int(x) if x.isdigit() else x.strip(),\n line)\n arguments = dict(zip(header, data))\n # name of ingridients are replaces with their instances\n arguments[\"ingridients\"] = [ingridients[ingridient] for ingridient\n in arguments[\"ingridients\"]]\n meal = Meal(**arguments)\n meals[meal.name] = meal\n return meals", "def load_file(self, beancount_file_path=None):\n if beancount_file_path:\n self.beancount_file_path = beancount_file_path\n\n self.all_entries, self.errors, self.options = \\\n loader.load_file(self.beancount_file_path)\n self.price_map = prices.build_price_map(self.all_entries)\n self.account_types = options.get_account_types(self.options)\n\n self.title = self.options['title']\n if self.options['render_commas']:\n self.format_string = '{:,f}'\n self.default_format_string = '{:,.2f}'\n else:\n self.format_string = '{:f}'\n self.default_format_string = '{:.2f}'\n self.dcontext = self.options['dcontext']\n\n self.active_years = list(getters.get_active_years(self.all_entries))\n self.active_tags = list(getters.get_all_tags(self.all_entries))\n self.active_payees = list(getters.get_all_payees(self.all_entries))\n\n self.all_root_account = realization.realize(self.all_entries,\n self.account_types)\n self.all_accounts = self._all_accounts()\n self.all_accounts_leaf_only = self._all_accounts(leaf_only=True)\n\n self._apply_filters()", "def get_entries(filename, **kwargs):\n return list(xmlreader.XmlDump(join_xml_data_path(filename),\n **kwargs).parse())", "def load_from_txt(path):\n with open(path) as file:\n data = [line.rstrip() for line in file]\n return data", "def load_tweets(path):\n with open(path, \"rb\") as f:\n import pickle\n tweets = pickle.load(f)\n return tweets", "def load(self):\n del self[0:len(self)]\n\n if not os.path.isfile(self.path):\n self.log.debug('No such file: {}'.format(self.path))\n return\n\n for line in [l.rstrip() for l in open(self.path, 'r').readlines()]:\n if line.startswith('#') or line.strip() == '':\n continue\n\n # Strip list of hosts from line\n hosts, key = line.split(None, 1)\n hosts = hosts.split(',')\n\n try:\n key = KnownHostsEntry(key)\n if key not in self:\n self.append(key)\n else:\n key = self[self.index(key)]\n key.add_hosts(hosts)\n except SSHKeyError:\n pass", "def read_data(cls, input_file,quotechar = None):\n if 'pkl' in str(input_file):\n lines = load_pickle(input_file)\n else:\n lines = input_file\n return lines", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def to_entries(self, add_tags=[], add_time=False, moments_only=False):\n entries = []\n\n entry_regex = \"\\*\"\n entry_search = re.compile(entry_regex)\n\n cur_entry = Moment()\n cur_entry.path = self.name\n\n new_entry = None\n \n try:\n self.seek(0)\n line = self.readline()\n line = unicode(line)\n except:\n print \"Problem reading file\"\n return entries\n\n #first line of a log should have an entry... this is our check\n if entry_search.match(line):\n self.has_entries = True\n while line:\n #we might have found a new entry...\n #see what kind, if any:\n (ts, line_tags) = timestamp.parse_line_for_time(line)\n if ts:\n new_entry = Moment()\n new_entry.created = timestamp.Timestamp(ts)\n elif entry_search.match(line): \n if not moments_only:\n new_entry = Moment()\n elif add_time and moments_only:\n #ok to make a default time for the entry\n new_entry = Moment()\n print \"no timestamp found in this entry\"\n else:\n #must be moments only,\n #but we don't want to add a timestamp\n #just include the data with the previous moment\n new_entry = None\n\n if new_entry:\n #finish up last entry...\n #only need to add if it had information\n if cur_entry.data or cur_entry.tags:\n entries.append(cur_entry)\n\n new_entry.path = self.name\n\n current_tags = line_tags.strip().split()\n\n if add_tags:\n temp_tags = add_tags[:]\n for t in current_tags:\n if t not in temp_tags:\n temp_tags.append(t)\n current_tags = temp_tags\n\n new_entry.tags.extend(current_tags)\n cur_entry = new_entry\n new_entry = None\n\n else:\n # only want to add the entry itself\n cur_entry.data += line\n\n line = unicode(self.readline())\n \n #need to get the last entry from the file, if there is one.\n if cur_entry.data:\n entries.append(cur_entry)\n\n #if not, don't scan\n else:\n print \"File does not start with an entry: %s\" % self.name\n \n return entries", "def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]", "def loadLiquiditySpecialCaseFromFile(file):\n\ttoDate = lambda x: \\\n\t\tfromExcelOrdinal(x) if isinstance(x, float) else \\\n\t\tdatetime.strptime(x, '%m/%d/%Y')\n\n\n\tupdatePosition = lambda position: mergeDict(\n\t\tposition\n\t , {'CALC_MATURITY': toDate(position['CALC_MATURITY'])}\n\t)\n\n\n\treturn \\\n\tcompose(\n\t\tdict\n\t , partial(map, lambda p: (p['ID'], p))\n\t , partial(map, updatePosition)\n\t , getRawPositionsFromFile\n\t)(file)", "def read_file(file_name, table):\r\n table.clear() # this clears existing data and allows to load data from file\r\n loadctr=0\r\n try:\r\n with open(file_name, 'rb') as objFile:\r\n data=pickle.load(objFile) #dump my 2D list into data\r\n while loadctr < len(data):\r\n table.append(data[loadctr]) #append my list element (which is a dictionary) into table\r\n loadctr+=1 #count number of rows loaded into memory\r\n print ('{} CD(s) loaded into inventory.\\n'.format(loadctr))\r\n except FileNotFoundError as e:\r\n print('Unable to load inventory from ' + file_name + '.') #exception handling for file not existing\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print()\r\n except EOFError as e:\r\n print(file_name + ' is empty.') #exception handling for empty file\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print()\r\n except pickle.UnpicklingError as e:\r\n print(file_name + ' is corrupted.') #exception handling for unpickling error\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print()", "def parse(las_file):\n io_stream = io.TextIOWrapper(las_file)\n \n entry_date = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n entry_filename = 'las_file-' + entry_date + '.las'\n\n entry = SectionInfo()\n entry.filename = entry_filename\n section = ''\n\n for line in io_stream.readlines():\n\n line = line.rstrip()\n\n if not line:\n continue\n\n # Lines beginning with '~' denote the next section header.\n if line[0] == '~':\n section = line\n continue\n # Skip comment lines.\n elif line[0] == '#':\n continue\n\n # LAS standard option 'OTHER' section\n if section[1] == 'O': \n entry.value = line\n entry.section = section\n # The rest of the standard metadata sections\n elif section[1] in ['V', 'W', 'C', 'P']:\n entry = parse_formatted_section_line(section, line, entry)\n # the data section and non-standard sections\n else:\n # print(\"Non-Metadata-Section: [{}]: [{}]\".format(section[0:2], line))\n continue\n\n # Write entry to db\n entry.save()\n\n # Initialize next entry\n entry = SectionInfo()\n entry.filename = entry_filename\n\n return entry_filename", "def loadData(catalog, accidentsfile):\n accidentsfile = cf.data_dir + accidentsfile\n input_file = csv.DictReader(open(accidentsfile, encoding=\"utf-8\"),\n delimiter=\",\") \n for accident in input_file:\n model.addAccident(catalog,accident)", "def load_registry(self, fname):\n with contextlib.ExitStack() as stack:\n if hasattr(fname, \"read\"):\n # It's a file object\n fin = fname\n else:\n # It's a file path\n fin = stack.enter_context(open(fname))\n\n for linenum, line in enumerate(fin):\n if isinstance(line, bytes):\n line = line.decode(\"utf-8\")\n\n line = line.strip()\n # skip line comments\n if line.startswith(\"#\"):\n continue\n\n elements = shlex.split(line)\n if not len(elements) in [0, 2, 3]:\n raise OSError(\n f\"Invalid entry in Pooch registry file '{fname}': \"\n f\"expected 2 or 3 elements in line {linenum + 1} but got \"\n f\"{len(elements)}. Offending entry: '{line}'\"\n )\n if elements:\n file_name = elements[0]\n file_checksum = elements[1]\n if len(elements) == 3:\n file_url = elements[2]\n self.urls[file_name] = file_url\n self.registry[file_name] = file_checksum.lower()", "def loadFile(f_name):\n\n r_f = open(f_name, \"rb\")\n r_data = r_f.read().split(\"\\n\")\n rj_data = [json.loads(r) for r in r_data[:-1]]\n rjf_data = [{\"text\": r[\"lead_paragraph\"],\n \"date\": datetime.strptime(r[\"pub_date\"][0:10], \"%Y-%m-%d\"),\n \"id\": r[\"_id\"]}\n for r in rj_data]\n rjf_data = [r for r in rjf_data if r[\"text\"] is not None]\n return rjf_data", "def load_db(file):\n if os.path.isfile(file):\n try:\n start = time.time()\n db = []\n with open(file, 'r') as f:\n for item in json_lines.reader(f):\n db.append(item)\n stop = time.time() - start\n print(\"load_db time: \", stop, 'sec')\n return db\n except Exception as e:\n print(file, \"is probably corrupted. Creating empty db now...\")\n DbManager.erase_db(file)\n raise e\n\n else:\n # corrupt...\n print(\"database not found. creating new\")\n DbManager.new_db(file)", "def load_people(self, file_path):\n pass", "def read_chumps_from_file(self):\n for chump in self.comment_store.get_stored_chumps():\n self.add_chump(chump)", "def init_info_from_file(self, filename, log_level=logging.INFO):\n\n self.info = MissionInfo()\n self.info.read_mission_info(filename)\n self._init_logger(log_level)\n\n self.logger.info(\"Mission instance read succeeded.\")", "def load_items(self, filename):\n with open(filename, \"r\") as f:\n itemss = []\n for line in f:\n line = line.strip()\n # Add name, description and initial location to each item object\n if line.upper():\n name = line\n line = f.readline()\n line = line.strip()\n description = line\n line = f.readline()\n line = line.strip()\n initial_room_id = line\n item = Item(name, description, initial_room_id)\n itemss.append(item)\n line = f.readline()\n return itemss", "def read_from_file(self, file_name):\n with open(file_name) as file:\n record_id = 0\n for line in file:\n name = str(line).split(\"\\t\")\n record_id += 1\n self.city_names.append(name[0])\n self.original_records.append(line)\n # replacing the leftmost non-overlapping occurrences\n line = re.sub(\"\\W+\", \"\", line).lower()\n self.records.append(line)\n for qgram in self.get_qgrams(line):\n if len(qgram) > 0:\n # check if the qgram's already there\n if qgram not in self.inverted_lists:\n self.inverted_lists[qgram] = list()\n self.inverted_lists[qgram].append(record_id)", "def add_lines_from_file(self, file_path: str):\r\n\r\n # open the path as \"file\" -- this automatically closes the file\r\n with open(file_path) as file:\r\n # for each line in the file\r\n for line in file:\r\n # add the line\r\n self.add_line(line)", "def load_from_file(self, filename):\n # clear datastore mape\n self._datastoreMap = {}\n # citanje filea\n with open(filename, 'rb') as f:\n binstr = f.read()\n inMap = pickle.loads(binstr)\n # za svaki kanal moramo dodati element u _datastoreMap\n for kanal in inMap:\n # stvaramo instancu Datastore\n self._datastoreMap[kanal] = DataStore()\n # instanca Datastore zna se otpakirati iz mape (dictionary)\n self._datastoreMap[kanal].dict2store(inMap[kanal])", "def load_embeddings(self, str_file):\n\n with open(str_file, 'rb') as f_read:\n self.embeddings_entity = pickle.load(f_read)\n self.embeddings_relation = pickle.load(f_read)\n self.dict_paras = pickle.load(f_read)", "def load_rentals_file(filename):\n logging.debug('Loading rental file %s', filename)\n try:\n with open(filename) as file:\n try:\n data = json.load(file)\n except ValueError:\n logging.error('File %s cannot be read as JSON', filename)\n exit(0)\n except IOError:\n logging.error('File %s cannot be read (does not exist?)', filename)\n exit(0)\n logging.debug('Successfully loaded rental file %s', filename)\n return data", "def load(self, filename=\"\"):\n if len(filename) == 0:\n filename = self.filename\n self.quotes_list.load(filename)", "def from_file(cls, basename, *args, **keys):\n log.verbose(\"Loading mapping\", repr(basename), verbosity=55)\n path = keys.get(\"path\", None)\n if path:\n filename = os.path.join(path, os.path.basename(basename))\n basename = filename\n else:\n filename = config.locate_mapping(basename)\n text = utils.get_uri_content(filename)\n return cls.from_string(text, basename, *args, **keys)", "def getJournal(self):\n if 'journal' in self.extras: \n return self.extras['journal']\n #--Default \n self.extras['journal'] = _('[No Journal Record Found.]')\n #--Open save file and look for journal entry\n inPath = os.path.join(self.dir,self.name)\n ins = Tes3Reader(self.name,file(inPath,'rb'))\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n if name != 'JOUR':\n ins.seek(size,1,name)\n #--Journal\n else:\n (subName,subSize) = ins.unpackSubHeader('JOUR')\n if subName != 'NAME':\n self.extras['journal'] == _('[Error reading file.]')\n else:\n reDate = re.compile(r'<FONT COLOR=\"9F0000\">(.+?)</FONT><BR>')\n reTopic = re.compile(r'@(.*?)#')\n data = ins.read(subSize)\n data = reDate.sub(ReplJournalDate(),data)\n data = reTopic.sub(r'\\1',data)\n self.extras['journal'] = cstrip(data)\n break\n #--Done\n ins.close()\n print self.extras['journal']\n return self.extras['journal']", "def load_lines(filename):\r\n lines = []\r\n f = open(filename)\r\n for line in f.readlines():\r\n line = line.strip()\r\n lines.append(line)\r\n return lines", "def Load_File(filename):\n with open(filename) as file:\n data = file.readlines()\n return data", "def load_expenditures():\n\n Expenditure.query.delete()\n\n with open(expenditure_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n expenditure_data = row.split(\",\")\n print(expenditure_data)\n\n id = expenditure_data[0]\n category_id = expenditure_data[1]\n price = expenditure_data[2]\n date_of_expenditure = expenditure_data[3]\n expenditure_userid = expenditure_data[4]\n where_bought = expenditure_data[5]\n description = expenditure_data[6]\n\n expenditure = Expenditure(\n id = id,\n category_id = category_id,\n price = price,\n date_of_expenditure = get_datetime(date_of_expenditure),\n expenditure_userid = expenditure_userid,\n where_bought = where_bought,\n description = description\n )\n\n db.session.add(expenditure)\n\n db.session.commit()", "def from_file(cls, filepath, stemmer, synonyms):\n assert(isinstance(stemmer, Stemmer))\n assert(isinstance(synonyms, SynonymsCollection))\n entities = []\n with io.open(filepath, \"r\", encoding='utf-8') as f:\n for line in f.readlines():\n args = line.split()\n\n e_ID = args[0]\n e_str_type = args[1]\n e_begin = int(args[2])\n e_end = int(args[3])\n e_value = \" \".join([a.strip().replace(',', '') for a in args[4:]])\n a = Entity(e_ID, e_str_type, e_begin, e_end, e_value)\n\n entities.append(a)\n\n return cls(entities, stemmer, synonyms)", "def fetch_from_file(self, path):\n print(\"Fetching from %s ...\" % path)\n try:\n lines = open(path).readlines()\n except Exception, e:\n print(\"Failed to fetch from %s: %s\" % (path, e))\n return False\n\n self.domain_list = []\n for line in lines:\n domain_line = line.rstrip()\n if domain_line != '':\n self.domain_list.append(domain_line)\n\n print(\"Got %d domains\" % len(self.domain_list))\n return True", "def load_lookup_table(path):\n assert os.path.exists(path), \"Lookup table file not found.\"\n with open(path, \"rb\") as dillfile:\n return dill.load(dillfile)", "def loaditems(self, fh):\n pass", "def load_org_notes_file():\n nodelist = orgnode.makelist(NOTES_ORG_FILE)\n return nodelist", "def load_pres_debates():\n debates = PlaintextCorpusReader(path.join(pwd, \"pres_debates\"), \".*.txt\")\n return debates", "def read_file(filename) -> List[Todo]:\n with pathlib.Path(filename).expanduser().open('r') as fp:\n return [Todo(_id, line) for _id, line in enumerate(fp)]", "def load_collection_from_file(filename):\n return load_collection_from_fp(open(filename))", "def _load_lemma_data(path_to_lemma_file):\n with open(path_to_lemma_file, 'r', encoding=\"utf-8\") as lemma_file:\n tmp_lemma_data = []\n for line in lemma_file:\n line = line.strip(\"\\n\")\n tmp_lemma_data.append(line)\n if \"ß\" in line:\n line = line.replace(u'ß', 'ss')\n tmp_lemma_data.append(line)\n return set(tmp_lemma_data)", "def load(file_name):\n file_data = []\n with io.open(file_name, \"r\", encoding=\"utf-8\") as f:\n file_data = [line.rstrip('\\n') for line in f]\n return file_data", "def fromfile(cls, f, n=-1):\n raise NotImplementedError(\"RedisLocalBloomFilter not support fromfile\")", "def load_ingridients(path):\n with open(file=path, encoding=\"utf-8-sig\", mode=\"r\") as file:\n ingridients = {} # ingridient_name: ingridient_instance\n header = [x.strip() for x in file.readline().strip().split(\";\")]\n lines = file.readlines()\n for line in lines:\n line = line.strip().split(\";\")\n # strings are converte to apropiate type\n data = map(lambda x: int(x) if x.isdigit() else x, line)\n arguments = dict(zip(header, data))\n instance = Ingridient(**arguments)\n ingridients[instance.name] = instance\n return ingridients", "def load_data(filename):\n hkas = HKArchiveScanner()\n hkas.process_file(filename)\n cat = hkas.finalize()\n fields, timelines = cat.get_data(['position'], short_match=True)\n return fields, timelines" ]
[ "0.7394419", "0.64924157", "0.62799805", "0.6213144", "0.6060179", "0.6032827", "0.5937667", "0.5873339", "0.5792897", "0.57746816", "0.57655644", "0.57224065", "0.56214803", "0.5567517", "0.5565053", "0.5565053", "0.5560204", "0.55173564", "0.54818314", "0.54786164", "0.54533386", "0.5433665", "0.54236835", "0.5408636", "0.5350907", "0.5341112", "0.53344345", "0.53302884", "0.53109455", "0.5304969", "0.5300213", "0.52980304", "0.5292896", "0.5275363", "0.52681017", "0.52681017", "0.5247321", "0.5234184", "0.5232799", "0.5231983", "0.522807", "0.52193964", "0.5215131", "0.5211391", "0.52015525", "0.5199987", "0.519853", "0.5198501", "0.51833564", "0.5177397", "0.5172412", "0.51693773", "0.5162688", "0.5156345", "0.51467365", "0.5144985", "0.51431865", "0.5141255", "0.51406187", "0.5135655", "0.5129929", "0.51293224", "0.51238394", "0.5119999", "0.51162165", "0.51083684", "0.51029545", "0.51019365", "0.5101926", "0.50972", "0.50948304", "0.50816214", "0.5078776", "0.5077484", "0.5075773", "0.5075505", "0.507485", "0.50734824", "0.50734043", "0.5065326", "0.5064923", "0.50620604", "0.50608504", "0.50522906", "0.50502545", "0.50478673", "0.5042617", "0.5039275", "0.50386447", "0.50376725", "0.50363505", "0.50337195", "0.50318134", "0.501493", "0.50128925", "0.50012743", "0.49973884", "0.49939296", "0.49894664", "0.49886876" ]
0.81673855
0
Load journal entries from a URI.
def load_from_web(journal: Journal, uri: str) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(name):\n jrn_path = build_path(name)\n if not os.path.exists(jrn_path):\n print(f'... journal file \\'{jrn_path}\\' does not exist ...')\n print('... initializing new journal ...')\n with open(jrn_path, 'w') as file:\n pass\n return []\n else:\n print(f'... loading journal entries from {jrn_path} ...')\n journal = []\n with open(jrn_path, 'r') as file:\n for line in file:\n journal.append(line.rstrip())\n print(f'... loaded {len(journal)} items')\n return journal", "def load(journal: Journal, file: Path) -> None:", "def get_entries(uri):\n if not uri.endswith('/entries'):\n uri += '/entries'\n results = VGOCache(uri).results\n\n results = [ adjust_entry(x) for x in results ]\n return results", "def fetchJournalEntries(date):\n\t\n\tpattern = '%d/%m/%Y'\n\tdatetime_object = datetime.datetime.strptime(date, pattern)\n\t\n\t#Getting the feeds from respective feed functions\n\tslackFeed = getFromSlack(datetime_object)\n\twebServiceFeed = getFromWebService(datetime_object)\n\tgithubFeed = getFromGitService(datetime_object)\n\tdynamoFeed = getFromDynamo(datetime_object)\n\t\n\t#Combining feeds into a single output\n\tentireFeed = reconcileFeed(slackFeed, webServiceFeed, githubFeed, dynamoFeed)\n\t\n\treturn entireFeed", "def load_articles():\n\t\n\tlog(\"Reading articles from file: articles_dumped...\")\n\tf = open(os.path.join(logdir, \"articles_dumped\"), 'rb')\n\tdumped = f.read()\n\tf.close()\n\t\n\tarticles = pickle.loads(dumped)\n\t\n\tlog(\"Done!\")\n\tsys.stdout.write(\"Done!\\n\")\n\tsys.stdout.flush()\n\t\n\treturn articles", "def loadFromStream(self, stream, uri=None):\n self.loadFromDom(parseStream(stream))", "def zhihu_rss_fetcher(ctx):\n URL = 'http://www.zhihu.com/rss'\n coll = ctx.get_mongo_collection()\n\n for entry in fetch_rss(URL).entries:\n try:\n coll.insert({'_id': entry.link})\n except DuplicateKeyError:\n continue\n ctx.new_item(TextOnlyItem(entry.title, entry.description), ['zhihu'],\n parse_entry_time(entry),\n {'id': entry.link})\n log_info(u'zhihu: new entry: {} {}'.format(entry.link,\n entry.title))", "def __init__(self, url=URL):\n self.entries = feedparser.parse(url).entries", "def load_journal_json(self, absolute_path):\n with open(absolute_path) as json_file:\n data = json.load(json_file)\n\n return data", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def fetch(feed):\n # Fetch the feed data.\n data = feedparser.parse(feed.ext_url)\n new_articles = []\n\n # If the `bozo` value is anything\n # but 0, there was an error parsing (or connecting) to the feed.\n if data.bozo:\n # Some errors are ok.\n if not isinstance(data.bozo_exception, feedparser.CharacterEncodingOverride) and not isinstance(data.bozo_exception, feedparser.NonXMLContentType):\n raise data.bozo_exception\n\n for entry in data.entries:\n\n # URL for this entry.\n url = entry['links'][0]['href']\n\n # Check for an existing Article.\n # If one exists, skip.\n if Article.objects(ext_url=url).first():\n continue\n\n data = extractor.extract(url, existing_data=entry)\n\n if data is None:\n continue\n\n # Secondary check for an existing Article,\n # by checking the title and source.\n existing = Article.objects(title=data['title']).first()\n if existing and existing.feed.source == feed.source:\n continue\n\n data['feed'] = feed\n\n article = Article(**data)\n article.save()\n new_articles.append(article)\n\n return new_articles", "def load_history_entries(self, *entries):\n # Simplified version:\n for entry in entries:\n try:\n self[entry.url.host] += [entry]\n except KeyError:\n self[entry.url.host] = [entry]\n \n \n temp_dict = {entry.url.host: [] for entry in entries} \n for entry in entries:\n temp_dict[entry.url.host] += [entry]\n\n # Update the dictionary\n # self.update(temp_dict) # Will override any lists with the same host name\n for host, entry in temp_dict.items():\n #try:\n self[host] += [entry]\n #except IndexError:\n #self[host] = [entry]", "def fetch_from_file(self, path):\n print(\"Fetching from %s ...\" % path)\n try:\n lines = open(path).readlines()\n except Exception, e:\n print(\"Failed to fetch from %s: %s\" % (path, e))\n return False\n\n self.domain_list = []\n for line in lines:\n domain_line = line.rstrip()\n if domain_line != '':\n self.domain_list.append(domain_line)\n\n print(\"Got %d domains\" % len(self.domain_list))\n return True", "def loadArtworks(catalog):\n artfile = cf.data_dir + 'Artworks-utf8-large.csv'\n input_file = csv.DictReader(open(artfile, encoding='utf-8'))\n for artwork in input_file:\n model.addArtwork(catalog, artwork)", "def articles():\n entries = []\n cur = g.db.execute(\n \"\"\"\n SELECT entries.location FROM categories\n INNER JOIN entries ON\n entries.slug = categories.slug AND\n entries.published = categories.published\n WHERE categories.category='{category}'\n ORDER BY entries.published DESC\n \"\"\".format(category='article'))\n\n for (row,) in cur.fetchall():\n if os.path.exists(row+\".md\"):\n entries.append(file_parser(row+\".md\"))\n return render_template('blog_entries.html', entries=entries)", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)", "def load(self, uri):\r\n self._encoder = load_model(uri+\"_lstm_encoder.hdf5\")\r\n self._autoencoder = load_model(uri+\"_lstm_autoencoder.hdf5\")\r\n\r\n pf = PyFolder(os.path.dirname(os.path.realpath(uri)))\r\n dict_options = pf[os.path.basename(uri)+\"_options.json\"]\r\n\r\n self._latent_space = dict_options['latent_space']\r\n self._input_cells = dict_options['input_cells']", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def LoadArtIntoDB(store,art):\n if 'srcorgname' in art and art['srcorgname'] is not None:\n srcorg = Misc.GetOrgID( art[ 'srcorgname' ] )\n else:\n # no publication specified - look up using domain name\n o = urlparse.urlparse(art['permalink'])\n domain = o[1].lower()\n srcorg = Publication.find_or_create(domain)\n art['srcorg'] = srcorg\n\n\n # resolve bylined authors to journo ids\n expected_journo = None\n authors = Byline.CrackByline(art['byline'])\n attributed = []\n for author in authors:\n attributed.append(Journo.find_or_create(author, art, expected_journo))\n art['journos'] = attributed\n\n# if opts.test:\n# ukmedia.PrettyDump( art )\n\n article_id = store.upsert( art )\n\n return article_id", "def loadArtists(catalog):\n artistsfile = cf.data_dir + 'MoMA/Artists-utf8-10pct.csv'\n input_file = csv.DictReader(open(artistsfile, encoding='utf-8'))\n for artist in input_file:\n model.addArtist(catalog, artist)", "def _handle_import(contents, use_tags, owner):\n \n lines = contents.decode(\"utf-8\").split(\"\\n\")\n \n title = re.compile(r\"<a.*?>(.+?)</a>\", re.I)\n url = re.compile(r\"\"\"<a.*href=['\"](.+?)['\"]\"\"\", re.I)\n tags = re.compile(r\"\"\"<a.*?tags=[\"'](.+?)[\"']\"\"\", re.I)\n addTime = re.compile(r\"\"\"<a.*?add_date=[\"'](\\d+?)[\"']\"\"\", re.I)\n \n for l in lines:\n if \"<a\" in l.lower() and \"</a>\" in l.lower():\n bookmark = {}\n \n bookmark[\"title\"] = title.search(l)\n if not bookmark[\"title\"]:\n continue\n bookmark[\"title\"] = _unescape(bookmark[\"title\"].group(1))\n \n bookmark[\"url\"] = url.search(l)\n if not bookmark[\"url\"]:\n continue\n bookmark[\"url\"] = _unescape(bookmark[\"url\"].group(1))\n \n bookmark[\"tags\"] = [];\n if use_tags:\n result = tags.search(l)\n if result:\n bookmark[\"tags\"] = map(_unescape, result.group(1).split(\",\"))\n \n bookmark[\"added\"] = addTime.search(l)\n if bookmark[\"added\"]:\n bookmark[\"added\"] = bookmark[\"added\"].group(1)\n \n if not Bookmark.objects.filter(owner=owner, url=bookmark[\"url\"]).exists():\n bm = Bookmark(owner=owner, url=bookmark[\"url\"], title=bookmark[\"title\"])\n \n bm.save()\n if bookmark[\"added\"]:\n bm.added = datetime.datetime.fromtimestamp(int(bookmark[\"added\"]))\n \n for t in bookmark[\"tags\"]:\n bm.tag(t)\n \n bm.save()\n bm.autotag_rules()", "def loadData(catalog):\n\n loadArtwork(catalog)\n loadArtists(catalog)", "def get_entries_by_url(self, url, regex=False, flags=None,\n group=None, history=False, first=False): \n if self.database is None:\n raise DatabaseNotOpened('No KeePass Database Opened.')\n else:\n return self.database.find_entries_by_url(url, \n regex, \n flags, \n group, \n history, \n first)", "def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries", "def getContents(self, itemId, itemURI, *args):\n if args:\n actionId = self._db.addAction(args[0]) \n else:\n actionId = -1\n\n print('\\t\\t[%s] %s\\t(%s)' % (itemId, itemURI, actionId))\n \n # dissect the file\n patURL = re.compile(r'URL=(?P<url>.*$)', re.IGNORECASE)\n patHttp = re.compile(r'(?P<url>http.*$)', re.IGNORECASE)\n patFtp = re.compile(r'(?P<url>ftp.*$)', re.IGNORECASE)\n\n f = open(itemURI,\"r\")\n url = ''\n idx = -1\n\n for line in f:\n idx += 1\n m = patURL.match(line)\n if not m:\n m = patHttp.match(line)\n\n if not m:\n m = patFtp.match(line)\n\n if m:\n url = m.group('url')\n itemIdRight = self._db.addItem(self._engine_id, url, datetime.datetime.now(), args)\n self._db.addItemLink(self._engine_id, itemId, itemIdRight, 'Contains')\n \n # we have a URI, down we wnat to action it, use the tail value to set the action:\n self._db.addItemEvent(self._engine_id, actionId, itemIdRight)\n\n self._db.addItemData(itemId, 'Contents', line, idx)", "def load_article(paper_id):\n s3 = boto3.client('s3')\n key = 'json/%s.json' % paper_id\n LOGGER.info('bucket=%s, key=%s', ARTICLE_BUCKET_NAME, key)\n obj = s3.get_object(\n Bucket=ARTICLE_BUCKET_NAME,\n Key=key)\n body = obj['Body']\n article = json.loads(body.read())\n body.close()\n return article", "def load(self, url):\n pass", "def load(self, url):\n pass", "def GetDocumentListEntry(self, uri):\n return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString)", "def loadArtworks(catalog):\n artworksfile = cf.data_dir + 'MoMA/Artworks-utf8-10pct.csv'\n input_file = csv.DictReader(open(artworksfile, encoding='utf-8'))\n for artwork in input_file:\n model.addArtwork(catalog, artwork)", "def load_mta_archived_feed(feed='gtfs', timestamp='2014-09-17-09-31'):\n import requests\n\n return requests.get(\"https://datamine-history.s3.amazonaws.com/{0}-{1}\".format(feed, timestamp))", "def loadArtworks(catalog):\n booksfile = cf.data_dir + 'MoMA/Artworks-utf8-small.csv'\n input_file = csv.DictReader(open(booksfile, encoding='utf-8'))\n for artwork in input_file:\n model.addArtwork(catalog, artwork)", "def load_authors(keys: [str]):\n not_in_local_cache = []\n result = []\n for key in keys:\n try:\n result.append(_author_data_cache[key].to_dict())\n del _author_data_cache[key]\n except KeyError:\n not_in_local_cache.append(key)\n \n if len(not_in_local_cache):\n doc_refs = [db.collection(AUTHOR_CACHE_COLLECTION).document(key)\n for key in keys]\n data = db.get_all(doc_refs)\n for datum in data:\n if not datum.exists:\n raise cache_buddy.CacheMiss(datum.id)\n result.append(datum.to_dict())\n return [_decompress_record(r) for r in result]", "def loadArtworks(catalog):\n artworksfile = cf.data_dir + 'MoMA/Artworks-utf8-small.csv'\n input_file = csv.DictReader(open(artworksfile, encoding='utf-8'))\n for artwork in input_file:\n model.addArtwork(catalog, artwork)", "def pull_articles(ls):\n # pull articles\n doi = self.search_articles(file)\n els_key = self.els_key\n\n for i in doi:\n els_url = 'https://api.elsevier.com/content/article/doi/' + doi + '?APIKey=' + els_key\n r = requests.get(els_url)\n for num in range(len(ls)):\n with open(folder + f'/write_test_els_paper{num}.xml', 'wb') as file:\n file.write(r.content)", "def read_data(osint_url, file_name):\n\n # Read in the file from https://osint.bambenekconsulting.com/feeds/\n osint_feed_url = osint_url + file_name\n\n logs_feed = urllib.request.urlopen(url=osint_feed_url).read().decode('utf-8')\n logs_feed = logs_feed.split('\\n')\n return logs_feed", "def loadArtists(catalog):\n tagsfile = cf.data_dir + 'MoMA/Artists-utf8-small.csv'\n input_file = csv.DictReader(open(tagsfile, encoding='utf-8'))\n for artist in input_file:\n model.addArtist(catalog, artist)", "def _load_for_cache(self, doc_uri, doc, parsed_uri):\n remote_uri = '{}://{}/{}'.format(\n parsed_uri.scheme, parsed_uri.netloc, parsed_uri.path)\n if self.verbose:\n print('Loading URI {}'.format(remote_uri), file=sys.stderr)\n response = self.session.get(remote_uri)\n try:\n response.raise_for_status()\n except HTTPError as e:\n raise RefResolutionException(\n 'Could not load file {}'.format(parsed_uri.geturl()))\n remote_json = self._load_json(response)\n return remote_json", "def test_detail_route_loads_proper_entry(testapp, fill_the_db):\n response = testapp.get('/journal/2', status=200)\n title = response.html.find_all(class_='articleTitle')[0].contents[0]\n assert title == ENTRIES[1][\"title\"]", "def mark_read(self, entries: Optional[list[Entry]] = None):\n\n with make_reader(self.reader_db_file) as reader:\n if entries is None:\n entries = reader.get_entries(read=False)\n for e in entries:\n reader.mark_entry_as_read(e)", "def parse_feed(uri):\n\n if OUTPUT:\n print \"parsing \" + uri\n\n feed = urllib2.urlopen(uri)\n xml = minidom.parse(feed)\n \n # look for <enclosure> tags\n enclosures = xml.getElementsByTagName(\"enclosure\")\n\n # extract the url attribute from any <enclosure> tags found\n file_uris = []\n for enclosure in enclosures:\n file_uris.append(enclosure.attributes[\"url\"].value)\n\n download_files(file_uris)", "def load_archive(self, from_date, to_date=None):\r\n return self.get_or_create_archive().fetch(from_date, to_date)", "def load_posts():\n \n with open(FILE_NAME, 'r') as f:\n return pickle.load(f)", "def __add_entries(entries, feed):\n\n for entry in entries:\n try:\n # If there is entry with such title in this feed\n Entry.objects.get(title=entry.title, feed=feed)\n continue\n except Entry.DoesNotExist:\n pass\n\n # Try to find another entries with such title\n e = Entry.objects.filter(title=entry.title)\n # If found\n if len(e) != 0:\n e = e[0]\n # Copy all containing\n entry_obj = Entry(title=e.title,\n description=e.description,\n entry=e.entry, feed=feed)\n entry_obj.save()\n # Or create new Entry from scratch\n else:\n entry_name = entry.title + '.html'\n # If bad link or entry name\n try:\n urlretrieve(entry.link, entry_name)\n\n entry_file = open(entry_name)\n entry_file = File(entry_file)\n\n entry_obj = Entry(title=entry.title,\n description=entry.description,\n entry=entry_file, feed=feed)\n entry_obj.save()\n\n os.remove(entry_name)\n except:\n # Go to next entry\n continue", "def _load_for_cache(self, parsed_uri, session):\n remote_uri = \"{}://{}/{}\".format(parsed_uri.scheme, parsed_uri.netloc, parsed_uri.path)\n if self.verbose:\n print(\"Loading URI {}\".format(remote_uri), file=sys.stderr)\n response = session.get(remote_uri)\n try:\n response.raise_for_status()\n except HTTPError as e:\n raise RefResolutionException(\n \"Could not load file {}\".format(parsed_uri.geturl())\n ) from e\n remote_json = self._load_json(response)\n return remote_json", "def _load(self, list_of_schema_urls):\n for uri in list_of_schema_urls:\n with urllib.request.urlopen(uri) as url:\n data = {}\n try:\n data = json.loads(url.read().decode())\n except:\n print(\"Failed to read schema from \" + uri)\n self._parser._load_schema(data)\n return self", "def loadArticles(self, pubList=[], outdir = \"\"):\n\n allArticles = {}\n publishers = [x for x in os.listdir(self.workdir) if x.find(\".\") == -1]\n for publisher in publishers:\n if len(pubList) > 0:\n if publisher not in pubList:\n continue\n articles = []\n publisherDir = \"{}/{}\".format(self.workdir, publisher)\n for month in [x for x in os.listdir(publisherDir) if x.find(\".\") == -1]:\n monthDir = \"{}/{}\".format(publisherDir, month)\n for day in os.listdir(monthDir):\n if day.endswith(\".csv\"):\n articles.append(\"{}/{}\".format(monthDir, day))\n\n articles = pd.concat([pd.read_csv(x) for x in articles], ignore_index=True)\n articles[\"source\"] = publisher\n allArticles[publisher] = articles.drop_duplicates(subset=\"text\", keep='first', inplace=False)\n allArticles[publisher].date = allArticles[publisher].date.apply(lambda x: x.split(\"-\")[0])\n allArticles[publisher][\"label\"] = allArticles[publisher][\"source\"] + \"_\" + allArticles[publisher][\"date\"]\n allArticles[publisher] = allArticles[publisher][[\"text\", \"label\"]]\n\n\n if len(articles) > 1:\n self.bigdf= pd.concat([allArticles[x] for x in allArticles]).fillna(\"\")\n else:\n self.bigdf = articles[list(allArticles.keys())[0]]\n self.ArticlesLoaded = True\n\n if outdir == \"\":\n return\n\n os.makedirs(outdir, exist_ok=True)\n self.bigdf.to_pickle(\"{}/bigdf.pkl\".format(outdir))", "def main():\n ds = 72\n title = 'Journal'\n journal_name = 'my-journal'\n headers.dashes_line(ds)\n headers.print_header(title, ds)\n data = journal.load(journal_name)\n event_loop(journal_name, data)\n # list_entries(data)\n # add_entry(data)\n # journal.save(journal_name, data)", "def load(uri: str, type: Optional[str] = None, *args, **kwargs) -> DataObject:\n from . import data # Loads all formats\n from . import core\n\n if type:\n return core.DataObject.registered_types[type].from_uri(uri, *args, **kwargs)\n else:\n return core.DataObject.from_uri(uri, *args, **kwargs)", "def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])", "def load_news(states=None):\n _connect()\n LOGGER.info(\"Reading newspaper configuration file ...\")\n valid_papers = read_config(\"newspaper\")\n if states:\n states = [states] if isinstance(states, str) else states\n valid_papers = [\n paper for paper in valid_papers if _check_state(states, paper[\"states\"])\n ]\n\n for metadata in tqdm(valid_papers):\n paper = newspaper.build(metadata[\"url\"])\n paper_data = {\n \"source_id\": metadata[\"id\"],\n \"source_name\": metadata[\"name\"],\n \"source_brand\": paper.brand,\n \"source_description\": paper.description,\n }\n\n for paper_article in paper.articles:\n try:\n paper_article.build()\n except (newspaper.ArticleException, ValueError):\n continue\n\n if not paper_article.summary:\n continue\n\n article_data = deepcopy(paper_data)\n article_data.update(\n {\n \"title\": paper_article.title,\n \"body\": paper_article.text,\n \"summary\": paper_article.summary,\n \"keywords\": paper_article.keywords,\n \"images\": list(paper_article.images),\n \"url\": paper_article.url,\n }\n )\n article = Article.from_dict(article_data)\n db.insert_obj(article, table=\"articles\", connection=_connection)", "def DataFromURI (uri, archive_directory=None):\n\n from pyxb.utils.six.moves.urllib.request import urlopen\n stream = None\n exc = None\n # Only something that has a colon is a non-file URI. Some things\n # that have a colon are a file URI (sans schema). Prefer urllib2,\n # but allow urllib (which apparently works better on Windows).\n if 0 <= uri.find(':'):\n try:\n stream = urlopen(uri)\n except Exception as e:\n exc = e\n if (stream is None) and six.PY2:\n import urllib\n try:\n stream = urllib.urlopen(uri)\n exc = None\n except:\n # Prefer urllib exception\n pass\n if stream is None:\n # No go as URI; give file a chance\n try:\n stream = open(uri, 'rb')\n exc = None\n except Exception as e:\n if exc is None:\n exc = e\n if exc is not None:\n _log.error('open %s', uri, exc_info=exc)\n raise exc\n try:\n # Protect this in case whatever stream is doesn't have an fp\n # attribute.\n if isinstance(stream, six.moves.file) or isinstance(stream.fp, six.moves.file):\n archive_directory = None\n except:\n pass\n xmld = stream.read()\n if archive_directory:\n base_name = os.path.basename(os.path.normpath(urlparse.urlparse(uri)[2]))\n counter = 1\n dest_file = os.path.join(archive_directory, base_name)\n while os.path.isfile(dest_file):\n dest_file = os.path.join(archive_directory, '%s.%d' % (base_name, counter))\n counter += 1\n try:\n OpenOrCreate(dest_file).write(xmld)\n except OSError as e:\n _log.warning('Unable to save %s in %s: %s', uri, dest_file, e)\n return xmld", "def articles():\n\n # Store the 'geo' part of the URL as a string called 'geo'. Check 'geo' loaded, and produce runtime error if not.\n # e.g. '12589'\n geo = request.args.get(\"geo\")\n if not geo:\n raise RuntimeError(\"missing geo\")\n\n # Run 'geo' through 'lookup()' function, store resulting list of objects in 'rows'.\n # e.g. [{'link':'www.website1.com','title':'article_title1'},{'link':'www.website2.com','title':'article_title2'}]\n rows = lookup(geo)\n\n # Run 'rows' through 'jsonify()'' function, and return resulting dictionary w/ up to 5 objects. The 'jsonify()' function modifies the input to JSON.\n # e.g. [{'link':'www.website1.com','title':'article_title1'},{'link':'www.website2.com','title':'article_title2'}]\n if len(rows) > 5:\n return jsonify(rows[0], rows[1], rows[2], rows[3], rows[4])\n else:\n return jsonify(rows)", "def fetch_article(self):\n dl = download(self.url, max_content_length=settings.PYPO_MAX_CONTENT_LENGTH)\n self.title, self.readable_article = parse(self, content_type=dl.content_type,\n text=dl.text, content=dl.content)", "def load(self):\n del self[0:len(self)]\n\n if not os.path.isfile(self.path):\n self.log.debug('No such file: {}'.format(self.path))\n return\n\n for line in [l.rstrip() for l in open(self.path, 'r').readlines()]:\n if line.startswith('#') or line.strip() == '':\n continue\n\n # Strip list of hosts from line\n hosts, key = line.split(None, 1)\n hosts = hosts.split(',')\n\n try:\n key = KnownHostsEntry(key)\n if key not in self:\n self.append(key)\n else:\n key = self[self.index(key)]\n key.add_hosts(hosts)\n except SSHKeyError:\n pass", "def parse_weblog(path):\n fullpath = os.path.abspath(path)\n with open(fullpath, 'rb') as f:\n for line in f:\n yield make_entry(line)", "def getJournal(self):\n if 'journal' in self.extras: \n return self.extras['journal']\n #--Default \n self.extras['journal'] = _('[No Journal Record Found.]')\n #--Open save file and look for journal entry\n inPath = os.path.join(self.dir,self.name)\n ins = Tes3Reader(self.name,file(inPath,'rb'))\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n if name != 'JOUR':\n ins.seek(size,1,name)\n #--Journal\n else:\n (subName,subSize) = ins.unpackSubHeader('JOUR')\n if subName != 'NAME':\n self.extras['journal'] == _('[Error reading file.]')\n else:\n reDate = re.compile(r'<FONT COLOR=\"9F0000\">(.+?)</FONT><BR>')\n reTopic = re.compile(r'@(.*?)#')\n data = ins.read(subSize)\n data = reDate.sub(ReplJournalDate(),data)\n data = reTopic.sub(r'\\1',data)\n self.extras['journal'] = cstrip(data)\n break\n #--Done\n ins.close()\n print self.extras['journal']\n return self.extras['journal']", "def load_mytransit_archived_feeds(timestamp=datetime.datetime(2017, 1, 1, 12, 0)):\n import requests\n\n ts = timestamp\n uri = \"http://data.mytransit.nyc.s3.amazonaws.com/subway_time/{0}/{0}-{1}/subway_time_{2}.tar.xz\".format(\n ts.year, str(ts.month).rjust(2, '0'), str(ts.year) + str(ts.month).rjust(2, '0') + str(ts.day).rjust(2, '0')\n )\n # filename_date_format = str(datetime.datetime.strftime(datetime.datetime(2016, 1, 1), \"%Y%m%dT%H%MZ\"))\n\n # The tar module does not seem to support reading virtual files via io.BytesIO, we have to go to disc.\n temp_filename = \"temp.tar.xz\"\n with open(temp_filename, \"wb\") as f:\n f.write(requests.get(uri).content)\n\n archive = tarfile.open(temp_filename, 'r')\n messages = [archive.extractfile(f) for f in archive.getmembers()]\n os.remove(temp_filename)\n\n return messages", "def parse_journal_article_record(root) -> dict:\n\n # print(\"Root\", root)\n # pmid = root.find(\"PMID\").text\n # print(\"PMID\", pmid)\n # quit()\n\n doc = {\n \"abstract\": \"\",\n \"pmid\": \"\",\n \"title\": \"\",\n \"authors\": [],\n \"pub_date\": \"\",\n \"journal_iso_title\": \"\",\n \"journal_title\": \"\",\n \"doi\": \"\",\n \"compounds\": [],\n \"mesh\": [],\n }\n\n doc[\"pmid\"] = root.xpath(\".//PMID/text()\")[0]\n\n doc[\"title\"] = next(iter(root.xpath(\".//ArticleTitle/text()\")), \"\")\n\n # TODO https:.//stackoverflow.com/questions/4770191/lxml-etree-element-text-doesnt-return-the-entire-text-from-an-element\n atext = next(iter(root.xpath(\".//Abstract/AbstractText/text()\")), \"\")\n\n for abstracttext in root.xpath(\".//Abstract/AbstractText\"):\n abstext = node_text(abstracttext)\n\n label = abstracttext.get(\"Label\", None)\n if label:\n doc[\"abstract\"] += f\"{label}: {abstext}\\n\"\n else:\n doc[\"abstract\"] += f\"{abstext}\\n\"\n\n doc[\"abstract\"] = doc[\"abstract\"].rstrip()\n\n doc[\"authors\"] = []\n for author in root.xpath(\".//Author\"):\n last_name = next(iter(author.xpath(\"LastName/text()\")), \"\")\n first_name = next(iter(author.xpath(\"ForeName/text()\")), \"\")\n initials = next(iter(author.xpath(\"Initials/text()\")), \"\")\n if not first_name and initials:\n first_name = initials\n doc[\"authors\"].append(f\"{last_name}, {first_name}\")\n\n pub_year = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Year/text()\")), None)\n pub_mon = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Month/text()\")), \"Jan\")\n pub_day = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Day/text()\")), \"01\")\n medline_date = next(\n iter(root.xpath(\".//Journal/JournalIssue/PubDate/MedlineDate/text()\")), None\n )\n\n pub_date = process_pub_date(doc[\"pmid\"], pub_year, pub_mon, pub_day, medline_date)\n\n doc[\"pub_date\"] = pub_date\n doc[\"journal_title\"] = next(iter(root.xpath(\".//Journal/Title/text()\")), \"\")\n doc[\"joural_iso_title\"] = next(iter(root.xpath(\".//Journal/ISOAbbreviation/text()\")), \"\")\n doc[\"doi\"] = next(iter(root.xpath('.//ArticleId[@IdType=\"doi\"]/text()')), None)\n\n doc[\"compounds\"] = []\n for chem in root.xpath(\".//ChemicalList/Chemical/NameOfSubstance\"):\n chem_id = chem.get(\"UI\")\n doc[\"compounds\"].append({\"id\": f\"MESH:{chem_id}\", \"name\": chem.text})\n\n compounds = [cmpd[\"id\"] for cmpd in doc[\"compounds\"]]\n doc[\"mesh\"] = []\n for mesh in root.xpath(\".//MeshHeading/DescriptorName\"):\n mesh_id = f\"MESH:{mesh.get('UI')}\"\n if mesh_id in compounds:\n continue\n doc[\"mesh\"].append({\"id\": mesh_id, \"name\": mesh.text})\n\n return doc", "def fetch_from_url(request, url):\n\n err = None\n article = None\n path = None\n \n url_path = get_url_path(url)\n\n try:\n root = Article.get_root()\n except:\n err = not_found(request, '')\n return (article, path, err)\n\n if url_path and root.slug == url_path[0]:\n url_path = url_path[1:]\n\n path = Article.get_url_reverse(url_path, root)\n if not path:\n err = not_found(request, '/' + '/'.join(url_path))\n else:\n article = path[-1]\n return (article, path, err)", "def load_urls(urls_dirpath):\n url_entities = {}\n for pkl in tqdm(sorted(os.listdir(urls_dirpath))):\n category_name, _ = pkl.strip().split(\".pickle\", 1)\n with open(os.path.join(urls_dirpath, pkl), \"rb\") as fp:\n for entity in pickle.load(fp)[1:]:\n yagouri, wikipage = entity\n if wikipage in url_entities:\n url_entities[wikipage].categories.append(category_name)\n else:\n url_entities[wikipage] = URLEntity(\n wikipage=wikipage,\n yagouri=yagouri,\n categories=[category_name]\n )\n return url_entities", "def load_list_of_entries(list_of_files):\n publication_entries = []\n entries = []\n titles = []\n\n for filename in list_of_files:\n entries_list = load_entries(filename)\n\n for e in entries_list:\n if e.main_publication:\n publication_entries.append(e)\n elif e.title not in titles:\n titles.append(e.title)\n entries.append(e)\n\n return publication_entries, entries", "def get_entries_by_path(self, entry_path_str, regex=False, flags=None,\n group=None, history=False, first=False): \n if self.database is None:\n raise DatabaseNotOpened('No KeePass Database Opened.')\n else:\n entry_path_list = entry_path_str.split('/')\n return self.database.find_entries_by_path(entry_path_list,\n regex,\n flags,\n group,\n history,\n first)", "def fetchContent(self):\n print 'fetching page by its path: '+ self.path\n uri = '%s?path=%s' % (self.client.MakeContentFeedUri(), self.path)\n # get the content feed\n feed = self.client.GetContentFeed(uri=uri)\n # take out the content\n self.entry = feed.get_webpages()[0]", "def test_detail_page_loads_correct_entry(testapp, fill_the_db):\n response = testapp.get('/journal/1', status=200)\n title = response.html.find_all(class_='entrytitle')[0].contents[0]\n assert title == ENTRIES[0]['title']", "def referenced_articles(self, url):\n pass", "def loader(self, href, parse, encoding=None):\n \n if href[0] != '/':\n href = os.path.join(self.directory, href)\n if not os.path.isfile(href) or not os.access(href, os.R_OK):\n raise RuntimeException(\"Xml: File %s Doesn't Exist or Not Readable (xi)\" % href)\n \n file = open(href)\n if parse == \"xml\":\n data = ElementTree.parse(file).getroot()\n else:\n data = file.read()\n if encoding:\n data = data.decode(encoding)\n file.close()\n return data", "def read (self, path):\n\n\t\tself.data = []\n\t\t# print \"*** path: %s***\" % path\n\t\tdir, filename = os.path.split (path)\n\t\troot, ext = os.path.splitext (filename)\n\t\t# encoding = 'ISO-8859-1' # utf-8\n\t\ts = codecs.open(path,'r', self.encoding).read()\n\t\t## s = unicode(f.read(),'utf-8')\n\t\ts = self.preprocess (s)\n\t\tlines = split (s, self.linesep)\n\t\tschema = self.splitline(lines[0])\n\n\t\t## print \"** %s **\" % os.path.splitext(filename)[0]\n\t\tif self.verbose:\n\t\t\tprint \"read %d lines from %s\" % (len(lines), path)\n\n\t\tfor i in range(1,len(lines)):\n\t\t\tif not lines[i].strip(): \n\t\t\t\t# print 'skipping line (%d)' % i\n\t\t\t\tcontinue\n\t\t\tfields = self.splitline(lines[i])\n\t\t\titem = self.entry_class (fields, schema)\n\t\t\tif self.accept (item):\n\t\t\t\tself.add (item)\n\n\t\tself.schema = schema\n\t\t# self.data.sort (lastNameCmp)", "def test_detail_route_loads_correct_entry(testapp, fill_the_db):\n response = testapp.get('/journal/2', status=200)\n title = response.html.find_all(class_='container')[0].h1.getText()\n body = response.html.find_all(class_='container')[0].p.getText()\n assert title == ENTRIES[1][\"title\"]\n assert body == ENTRIES[1][\"body\"]", "def blosxom_entry_parser(filename, request):\n config = request.getConfiguration()\n\n entryData = {}\n\n try:\n story = open(filename).readlines()\n except IOError:\n raise IOError\n\n if len(story) > 0:\n entryData['title'] = story.pop(0).strip()\n\n # this handles properties of the entry that are between\n # the title and the body and start with a #\n while len(story) > 0:\n match = re.match(r'^#(\\w+)\\s+(.*)', story[0])\n if match:\n story.pop(0)\n entryData[match.groups()[0]] = match.groups()[1].strip()\n else:\n break\n\n # Call the preformat function\n entryData['body'] = tools.run_callback('preformat',\n {'parser': (entryData.get('parser', '') \n or config.get('parser', 'plain')),\n 'story': story,\n 'request': request},\n donefunc = lambda x:x != None,\n defaultfunc = lambda x: ''.join(x['story']))\n\n # Call the postformat callbacks\n tools.run_callback('postformat',\n {'request': request,\n 'entry_data': entryData})\n \n return entryData", "def load(url, file_name, folder):\n # Need special case for Stack Overflow (more than one 7z file)\n\n if not os.path.isfile(file_name):\n #downloads file from url; two url patterns are attempted\n testfile = request.URLopener()\n try:\n testfile.retrieve(url[0], file_name)\n except error.HTTPError as e:\n try:\n testfile.retrieve(url[1], file_name)\n except:\n print (\"Error: URL retrieval of \" + url[0] + \" and \" + url[1] + \" failed for reason: \" + e.reason)\n quit()\n\n #un-zips file and puts contents in folder\n a = py7z_extractall.un7zip(file_name)\n if not (os.path.isfile(os.path.join(folder, \"PostLinks.xml\")) and os.path.isfile(os.path.join(folder, \"Posts.xml\"))):\n a.extractall(folder)", "def load_file_data_from_db(sip, base_path):\n my_entry = FSEntries(sip)\n md_object = add_collection_name(my_entry.md_info, base_path)\n return md_object", "def load_biblio(self, file_name, preload_ids=False, chunksize=1000):\n\n logger.info( \"Loading biblio data from [{}], with chunk size {}. Preload IDs? {}\".format(file_name, chunksize, preload_ids) )\n\n input_file = codecs.open(file_name, 'r', 'utf-8')\n biblio = json.load(input_file)\n\n sql_alc_conn = self.db.connect()\n db_api_conn = sql_alc_conn.connection\n\n if (\"cx_oracle\" in str(self.db.dialect)):\n title_ins = DBBatcher(db_api_conn, 'insert into schembl_document_title (schembl_doc_id, lang, text) values (:1, :2, :3)')\n classes_ins = DBBatcher(db_api_conn, 'insert into schembl_document_class (schembl_doc_id, class, system) values (:1, :2, :3)')\n else:\n title_ins = DBBatcher(db_api_conn, 'insert into schembl_document_title (schembl_doc_id, lang, text) values (%s, %s, %s)')\n classes_ins = DBBatcher(db_api_conn, 'insert into schembl_document_class (schembl_doc_id, class, system) values (%s, %s, %s)')\n\n\n ########################################################################\n # STEP 1: If overwriting, find extant docs and pre-populate doc ID map #\n ########################################################################\n\n extant_docs = set()\n\n if self.overwrite or preload_ids:\n\n for chunk in chunks(biblio, chunksize):\n\n # Loop over all biblio entries in this chunk\n doc_nums = set()\n for bib in chunk[1]:\n\n input_pubnum = self._extract_pubnumber(bib)\n\n # Early return: don't bother querying if we already have an ID\n if input_pubnum in self.doc_id_map:\n extant_docs.add( input_pubnum ) \n continue\n\n doc_nums.add(input_pubnum)\n\n if len(doc_nums) == 0:\n continue\n\n self._fill_doc_id_map(doc_nums, sql_alc_conn, extant_docs)\n\n logger.info( \"Discovered {} existing IDs for {} input documents\".format( len(extant_docs),len(biblio)) )\n\n\n ########################################################\n # STEP 2: Main biblio record processing loop (chunked) #\n ########################################################\n\n for chunk in chunks(biblio, chunksize):\n\n logger.debug( \"Processing {} biblio records, up to index {}\".format(len(chunk[1]), chunk[0]) )\n\n new_doc_mappings = dict() # Collection IDs for totally new document \n overwrite_docs = [] # Document records for overwriting\n duplicate_docs = set() # Set of duplicates to read IDs for\n known_count = 0 # Count of known documents\n\n new_titles = []\n new_classes = [] \n\n doc_insert_time = 0\n\n\n transaction = sql_alc_conn.begin()\n\n for bib in chunk[1]:\n\n ########################################\n # STEP 2.1 Extract core biblio records #\n ########################################\n\n family_id, pubdate, pubnumber, assign_applic = self._extract_core_biblio(bib)\n\n life_sci_relevant = self._extract_life_sci_relevance(bib)\n\n\n ####################################################\n # Step 2.2 Overwrite or Insert the document record #\n ####################################################\n\n if pubnumber in extant_docs:\n\n known_count += 1\n\n if self.overwrite:\n # Create an overwrite record\n doc_id = self.doc_id_map[pubnumber] \n overwrite_docs.append({\n 'extant_id' : doc_id,\n 'new_published' : pubdate,\n 'new_family_id' : family_id,\n 'new_life_sci_relevant' : life_sci_relevant,\n 'new_assign_applic' : assign_applic })\n else:\n # The document is known, and we're not overwriting: skip\n continue\n\n else:\n \n # Create a new record for the document\n record = {\n 'scpn' : pubnumber,\n 'published' : pubdate,\n 'family_id' : family_id,\n 'assign_applic' : assign_applic,\n 'life_sci_relevant' : int(life_sci_relevant) }\n \n try:\n\n start = time.time()\n result = sql_alc_conn.execute( self.docs.insert(), record )\n end = time.time()\n\n doc_insert_time += (end-start)\n\n except Exception, exc:\n\n if exc.__class__.__name__ != \"IntegrityError\":\n raise\n\n elif self.allow_document_dups:\n\n # It's an integrity error, and duplicates are allowed.\n known_count += 1\n duplicate_docs.add(pubnumber)\n\n # Reset transaction\n transaction.commit()\n transaction = sql_alc_conn.begin()\n continue \n\n else:\n\n raise RuntimeError(\n \"An Integrity error was detected when inserting document {}. This \"\\\n \"indicates insertion of an existing document, but duplicates have been disallowed\".format(pubnumber))\n\n\n doc_id = result.inserted_primary_key[0] # Single PK\n new_doc_mappings[pubnumber] = doc_id\n\n self._extract_detailed_biblio(bib, doc_id, new_classes, new_titles, pubnumber)\n\n # Commit the new document records, then update the in-memory mapping with the new IDs\n transaction.commit()\n self.doc_id_map.update(new_doc_mappings)\n\n logger.info(\"Processed {} document records: {} new, {} duplicates. DB insertion time = {:.3f}\".format( len(chunk[1]), len(new_doc_mappings), known_count, doc_insert_time))\n\n\n ########################################################\n # STEP 2.2: Deal with document overwrites / duplicates #\n ########################################################\n\n if len(overwrite_docs) > 0:\n\n transaction = sql_alc_conn.begin()\n\n # Update the master record for the document that's being overwritten\n stmt = self.docs.update().\\\n where(self.docs.c.id == bindparam('extant_id')).\\\n values(published=bindparam('new_published'), \n family_id=bindparam('new_family_id'), \n life_sci_relevant=bindparam('new_life_sci_relevant'),\n assign_applic=bindparam('new_assign_applic'))\n\n sql_alc_conn.execute(stmt, overwrite_docs)\n\n # Clean out ALL other references to the document, for re-insertion\n delete_ids = [record['extant_id'] for record in overwrite_docs]\n\n stmt = self.titles.delete().where( self.titles.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n stmt = self.classes.delete().where( self.classes.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n stmt = self.chem_mapping.delete().where( self.chem_mapping.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n transaction.commit()\n\n logger.info(\"Overwrote {} duplicate documents (master doc record updated, all other references deleted)\".format(len(overwrite_docs)))\n\n if len(duplicate_docs) > 0:\n self._fill_doc_id_map(duplicate_docs, sql_alc_conn)\n\n logger.info(\"Read {} IDs for duplicate documents\".format(len(duplicate_docs)))\n\n ########################################################\n # STEP 2.3: Bulk insertion of titles / classifications #\n ########################################################\n\n\n # Bulk insert titles and classification\n if self.load_titles:\n title_ins.execute(new_titles)\n logger.debug(\"Insertion of {} titles completed\".format(len(new_titles)) )\n\n if self.load_classifications:\n classes_ins.execute(new_classes)\n logger.debug(\"Insertion of {} classification completed\".format(len(new_classes)) )\n\n # END of main biblio processing loop\n\n # Clean up resources\n title_ins.close()\n classes_ins.close()\n sql_alc_conn.close()\n input_file.close()\n\n logger.info(\"Biblio import completed\" )", "def __init__(self, uri):\n\n self.uri = uri", "def load_data(self, read_shelf):\n if read_shelf:\n try:\n # Attempt reading pre-shelved objects first\n self.__read_shelf()\n except Exception as e:\n print(f'Exception while reading the data shelf ({e})')\n # Otherwise, read data from the the json files\n self.__read_json()\n else:\n self.__read_json()", "def get_contents_of_urls(urls):\n contents = []\n\n for url in urls:\n content = read_url(url)\n parsed_content = json.loads(content)\n contents.extend(parsed_content)\n return contents", "def retrieve_articles(article_indexes):\n articles = []\n for index in article_indexes:\n filename, position = index.split('@')\n with open(filename, 'r', encoding='utf-8') as articles_file:\n articles_file.seek(int(position))\n line = articles_file.readline()\n article = line\n while line[0] != '}':\n line = articles_file.readline()\n article += line\n articles.append(article)\n return articles", "def localLoad(url, delaySecs=0):\n try:\n contents = open(url[len(\"file://\"):]).read()\n except IOError, e:\n d = Deferred()\n d.errback(e)\n return d\n if not delaySecs:\n return succeed(contents)\n d = Deferred()\n reactor.callLater(delaySecs, lambda: d.callback(contents))\n return d", "def loadFile(self, path):\n books_added = 0\n records_added = 0\n books_to_clean = set()\n\n PDEBUG('Loading from file: %s', path)\n\n with open(path) as fd:\n while True:\n content = fd.read(PAGE_SIZE)\n if content is None:\n break\n if len(content) == 0:\n break\n pos = 0\n while True:\n m = R_MATCH_ENTRY.search(content, pos)\n if m is None:\n new_content = fd.read(PAGE_SIZE)\n if len(new_content) == 0:\n print('New books: %d, new records: %d' %\n (books_added, records_added))\n print('EOF reached...')\n return (books_added, records_added)\n else:\n content = content[pos:] + new_content\n pos = 0\n else:\n (book, author) = process_book_name(m.group(1))\n book = handleStr(book)\n author = handleStr(author)\n page = handleStr(m.group(2).strip())\n time = handleStr(m.group(3).strip())\n mark = handleStr(m.group(4).strip())\n pos = m.end(0)\n\n bts = book.encode()\n if bts[0:3] == codecs.BOM_UTF8:\n PDEBUG('oops: ')\n PDEBUG('%X-%X-%X', bts[0], bts[1], bts[2])\n\n sys.exit()\n\n if len(mark) == 0:\n continue\n\n res = R_MATCH_POS.match(page)\n if res is None:\n res = R_MATCH_PAGE.match(page)\n if res is None:\n PDEBUG('oops: %s -- %s', book, page)\n sys.exit(1)\n\n pos_str = res.group(1)\n typ_str = res.group(2)\n\n (new_book, new_clip) = \\\n self.__addEntry__(\n book, author, pos_str, typ_str, time, mark)\n\n if new_book:\n books_added += 1\n\n if new_clip:\n books_to_clean.add(book)\n records_added += 1\n\n if books_to_clean:\n PDEBUG('Books to clean: %s', books_to_clean)\n\n for book in books_to_clean:\n self.cleanUpBook(book)\n\n print('Total books added: %d, clips added:%d' %\n (books_added, records_added))\n\n return (books_added, records_added)", "def load_employees(file_path):\n\temployees = []\n\tfor line in open(file_path):\n\t\temployee = Employee.employee_from_insert_stmnt(line)\n\t\tif employee:\n\t\t\temployees.append(employee)\n\treturn employees", "def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]", "def import_opml(self, path):\n tree = ET.parse(path)\n root = tree.getroot()\n for feed_el in root.find('body').findall('outline'):\n if feed_el.get('type') == 'rss':\n self.add_by_url(feed_el.get('xmlUrl'), feed_el.get('text'))", "def read_content_load(self, filename):\n str_file_woc = self.import_file(filename)\n self.parse_load(str_file_woc)\n self.disp_load_info()", "def handle_import(self, options):\n url = options.get(\"url\")\n flush = options.get('flush')\n if flush:\n from backend.blog.models import BlogCategory, BlogComment, BlogPost\n BlogComment.query.delete()\n BlogPost.query.delete()\n BlogCategory.query.delete()\n\n if url is None:\n raise InvalidCommand(\"Usage is import_wordpress \")\n try:\n import feedparser\n except ImportError:\n raise InvalidCommand(\"Could not import the feedparser library.\")\n feed = feedparser.parse(url)\n\n # We use the minidom parser as well because feedparser won't\n # interpret WXR comments correctly and ends up munging them.\n # xml.dom.minidom is used simply to pull the comments when we\n # get to them.\n xml = parse(url)\n xmlitems = xml.getElementsByTagName(\"item\")\n for (i, entry) in enumerate(feed[\"entries\"]):\n # Get a pointer to the right position in the minidom as well.\n xmlitem = xmlitems[i]\n excerpt = getattr(entry, 'excerpt_encoded')\n content = linebreaks(self.wp_caption(entry.content[0][\"value\"]))\n\n # Get the time struct of the published date if possible and\n # the updated date if we can't.\n pub_date = getattr(entry, \"published_parsed\", entry.updated_parsed)\n if pub_date:\n pub_date = datetime.fromtimestamp(mktime(pub_date))\n pub_date -= timedelta(seconds=timezone)\n\n # Tags and categories are all under \"tags\" marked with a scheme.\n terms = defaultdict(set)\n for item in getattr(entry, \"tags\", []):\n terms[item.scheme].add(item.term)\n if entry.wp_post_type == \"post\":\n post = self.add_post(title=entry.title, content=content,\n pub_date=pub_date, tags=terms[\"post_tag\"],\n categories=terms[\"category\"],\n excerpt=excerpt,\n old_url=entry.id)\n\n # Get the comments from the xml doc.\n for c in xmlitem.getElementsByTagName(\"wp:comment\"):\n name = self.get_text(c, \"author\", c.CDATA_SECTION_NODE)\n email = self.get_text(c, \"author_email\", c.TEXT_NODE)\n url = self.get_text(c, \"author_url\", c.TEXT_NODE)\n body = self.get_text(c, \"content\", c.CDATA_SECTION_NODE)\n pub_date = self.get_text(c, \"date_gmt\", c.TEXT_NODE)\n fmt = \"%Y-%m-%d %H:%M:%S\"\n pub_date = datetime.strptime(pub_date, fmt)\n pub_date -= timedelta(seconds=timezone)\n self.add_comment(post=post, name=name, email=email,\n body=body, website=url,\n pub_date=pub_date)\n\n # elif entry.wp_post_type == \"page\":\n # old_id = getattr(entry, \"wp_post_id\")\n # parent_id = getattr(entry, \"wp_post_parent\")\n # self.add_page(title=entry.title, content=content,\n # tags=terms[\"tag\"], old_id=old_id,\n # old_parent_id=parent_id)", "def process(url):\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n for entry in entries:\n print entry\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n summary = translate_html(entry.summary)\n try:\n subject = translate_html(entry.tags[0]['term'])\n except AttributeError:\n subject = \"\"\n newsStory = NewsStory(guid, title, subject, summary, link)\n ret.append(newsStory)\n return ret", "def load_tweets(path):\n with open(path, \"rb\") as f:\n import pickle\n tweets = pickle.load(f)\n return tweets", "def fetch(self, url, listener, useCache = True): #$NON-NLS-1$\r", "def url_read(self, url):\n if 'raise' in url:\n raise urllib.error.HTTPError(None, None, None, None, None)\n else:\n return self.contents", "def fetch_entry(self, entry_id, **args):\n return self.fetch(\"/entry/\" + entry_id, **args)", "def atom_feed(cls, id):\n Article = Pool().get('nereid.cms.article')\n\n try:\n articles = Article.search([\n ('author', '=', id),\n ('state', '=', 'published'),\n ])\n except:\n abort(404)\n\n feed = AtomFeed(\n \"Articles by Author %s\" % cls(id).display_name,\n feed_url=request.url, url=request.host_url\n )\n for article in articles:\n feed.add(**article.serialize(purpose='atom'))\n\n return feed.get_response()", "def get_new_local_articles(fixture_file=settings.FIXTURE_PATH, article_root=article_root):\n articles = []\n with open(fixture_file, 'r') as f:\n posts = json.load(f, encoding='utf8')\n for title in article_titles(article_root):\n match = False\n for index, post in enumerate(posts):\n if names_are_equal(title, post['fields']['title']):\n match = True\n if index == len(posts)-1 and not match:\n articles.append(title)\n return articles", "def fetch_article_list(self, url):\n print(url)\n\n r = requests.get(url, headers=headers, timeout=10)\n html = r.text\n time.sleep(1)\n\n if r.status_code is not 200:\n print('Server dinied. Status:[%s].'%r.status_code)\n return\n\n # local data test\n #with open('./dataset/sina-blog-list.html', 'r') as f:\n # html = f.read()\n\n #print(html)\n\n soup = BeautifulSoup(html, 'html5lib')\n tags = soup.select('div[class=articleList] > div[class~=articleCell] > p > span[class=atc_title] > a')\n\n for t in tags:\n print('Appened: '+t['href'])\n self.article_urls.append(t['href'])\n\n # Get the url of next blog-list page\n nxpage = soup.select('div[class=SG_page] > ul > li[class=SG_pgnext] > a')\n if len(nxpage) > 0:\n #print ('Next list page: '+nxpage[0]['href'])\n self.fetch_article_list(nxpage[0]['href'])\n else:\n print('Have reached to the botom of blog lists.')\n\n\n # backup lists to local file\n with open(self.path+'/blog-lists.txt', 'w') as f:\n f.write('\\n'.join(self.article_urls))", "def Load(self, path):\n\n self.data = dict()\n\n if os.path.exists(self.cache_path):\n self.data = np.load(self.cache_path, allow_pickle=True)[()]\n\n if 'xlsx' in path:\n workBook = xlrd.open_workbook(path)\n sheet1_content1 = workBook.sheet_by_index(0)\n\n for i in tqdm(range(sheet1_content1.nrows)):\n Time = sheet1_content1.cell(i, 0).value\n Link = sheet1_content1.cell(i, 1).value\n Content = sheet1_content1.cell(i, 2).value\n\n id = Link[-16:]\n\n if not id in self.data:\n self.data[id] = dict()\n\n self.data[id]['time'] = Time\n self.data[id]['link'] = Link\n self.data[id]['post'] = Content\n\n elif 'json' in path:\n f = open(path, 'r', encoding='utf-8')\n text = f.read()\n data = json.loads(text)\n\n for v in data:\n id = v['link'][-16:]\n\n if not id in self.data:\n self.data[id] = dict()\n\n self.data[id]['time'] = v['Time']\n self.data[id]['address'] = v['address']\n self.data[id]['location'] = v['location']\n self.data[id]['post'] = v['post']\n self.data[id]['link'] = v['link']\n\n np.save(self.cache_path, self.data)", "def load (self, uri):\n child = self.tabs.get_nth_page(self.tabs.get_current_page())\n wv = child.get_child()\n wv.open(uri)", "def list(self,params=None, headers=None):\n path = '/mandate_import_entries'\n \n\n response = self._perform_request('GET', path, params, headers,\n retry_failures=True)\n return self._resource_for(response)", "def entry(request, entry_id):\n\n __time_update(request.user)\n\n try:\n entry = Entry.objects.get(id=entry_id)\n feed = entry.feed\n\n if feed.user == request.user:\n entry = entry.entry.read()\n else:\n return render_to_response('message.html', {'message':\n 'There is no such entry.',\n 'back': '/feeds'})\n except:\n return render_to_response('message.html', {'message':\n 'Error opening entry file! Please, reload feed.',\n 'back': '/feeds'})\n\n return HttpResponse(entry)", "def load_cache(base_url, path=\"logs/\"):\n\n # Convert URL to filename and read contents\n url_filename = url_to_filename(base_url)\n\n filename = f\"{path}CACHE-{url_filename}.html\"\n f = open(filename, \"r\")\n data_cache = f.read()\n\n data_cache = \" \".join(data_cache.split()) # Remove all whitespaces\n\n return data_cache", "def loadFromString(self, string, uri=None):\n self.loadFromDom(parseString(string))", "def load_topics():\n\n print \"Importing topics...\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate retailers\n Topic.query.delete()\n\n # Read CSV file\n with open(\"seed_data/topics.csv\") as source_file:\n example_data = list(csv.reader(source_file))\n\n # skip header row for populating db\n for list_item in example_data[1:]:\n topic = Topic(topic_title=list_item[1])\n\n # Add the current retailer to the session\n db.session.add(topic)\n\n # Commit the db.session changes to the database\n db.session.commit()" ]
[ "0.6321398", "0.6246365", "0.5560296", "0.5538123", "0.53121865", "0.5294257", "0.5108225", "0.51056397", "0.50977325", "0.50559926", "0.5041016", "0.49502626", "0.49426818", "0.49289203", "0.49164444", "0.48940632", "0.48866275", "0.48804903", "0.48804903", "0.48515296", "0.48508194", "0.4849793", "0.48485938", "0.48274857", "0.48256782", "0.4816751", "0.48116228", "0.48115733", "0.48115733", "0.48056406", "0.4800021", "0.4790606", "0.47876963", "0.4780716", "0.47514758", "0.47466457", "0.47442108", "0.47362596", "0.47271052", "0.4711131", "0.46848342", "0.4672557", "0.4661507", "0.46470028", "0.46440557", "0.46398687", "0.46280822", "0.461066", "0.46037617", "0.4588929", "0.45860487", "0.4584931", "0.45818827", "0.45804596", "0.4568291", "0.4560837", "0.45354217", "0.45326522", "0.4529162", "0.4491648", "0.44881767", "0.44774693", "0.44678605", "0.44663343", "0.4464597", "0.44581452", "0.44530913", "0.445011", "0.44479877", "0.44468805", "0.44396552", "0.44380903", "0.44357532", "0.4431391", "0.44267428", "0.44181752", "0.44144124", "0.44019032", "0.43959093", "0.43915245", "0.43894723", "0.4383409", "0.4382342", "0.4370613", "0.43701738", "0.43640578", "0.43513283", "0.43483102", "0.4344391", "0.43439913", "0.4341406", "0.43408164", "0.4339212", "0.43310758", "0.43217334", "0.4319741", "0.4312492", "0.43102688", "0.43080324", "0.43026942" ]
0.75825113
0
If passed two rows start generating collection of forests. Other way, if passed previous generation of collection spawning next generation
def __init__(self, settings, input_row=None, output_row=None, previous_generation=None): self._fullInput = [] self.power = 0 self._forests = [] self._fullOutput = [] self.best_fitness = 0 self.roulet = [] self.settings = settings if input_row and output_row: self._generate(list(input_row), list(output_row)) elif previous_generation: self._next_generation(previous_generation) else: raise Exception('wrong arg"s')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _next_generation(self, previous_generation):\n self._fullInput, self._fullOutput = previous_generation.get_data()\n self.power = self.settings.population_count\n for forest_iteration in range(self.power):\n first, second = previous_generation.selection()\n print 'selected for crossover ->', first.fitness, second.fitness\n self._forests.append(OneForest(self.settings, first_forest=first, second_forest=second))", "def next_generation(self, population):\n pass", "def _next_gen(self):\n\n selected = self.select()\n offspring = self.population.mate(mating_individuals=selected)\n self.population.delete(np.arange(len(self.population.individuals)))\n self.population.add(offspring)\n self._current_gen_count += 1\n self._gen_count_in_curr_iteration += 1\n self._function_evaluation_count += offspring.shape[0]", "def nextGeneration(self):\n # select two parents from the current generation.\n parent_1 = self.selection()\n parent_2 = self.selection()\n # to not get the same parents.\n _ = 0\n while _ < 30 and parent_2 == parent_1:\n parent_2 = self.selection()\n _ += 1\n # apply crossover on those parents (crossover_rate chance).\n crossover_chance = random.uniform(0, 1)\n parents = [parent_1, parent_2]\n if crossover_chance <= self.crossoverRate:\n offspring = self.crossover(parents)\n else:\n return \n # apply mutations on the new offspring (mutation_rate chance).\n mutation_chance = random.uniform(0, 1)\n newoffspring = offspring\n if mutation_chance <= self.mutationRate:\n newoffspring = self.mutation(offspring)\n # replace one of the parents in the new generation, given the loser parent.\n self.replaceLoser(parents, newoffspring)\n\n # now the new generation is available in the self.currentGeneration", "def _generate_rows(self):\n logger.debug(\"Generating pre-genealogical coherence data for %s\", self.w1)\n if not self.rows:\n for w2 in self.all_mss:\n if self.w1 == w2:\n continue\n self._add_row(w2)\n\n self._sort()\n logger.debug(\"Generated pre-genealogical coherence data for %s\", self.w1)", "def _generate(self, input_row, output_row):\n self._fullInput = input_row\n self.power = self.settings.population_count\n self._fullOutput = output_row\n for one_forest in range(self.power):\n self._forests.append(OneForest(self.settings, input_row=self._fullInput, full_output=self._fullOutput))", "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def _next_generation(self, ranks):\n replace = ranks[:int(self.population_size * self.culling)]\n for idx in replace:\n self.population[idx] = self._create_offspring()", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])", "def create_next_gen(self, parents_sreprs_couple):\n child0, child1 = self.recombine(parents_sreprs_couple[0], parents_sreprs_couple[1])\n if random.random() < self.mutate_prob:\n child0 = self.mutate(child0)\n if random.random() < self.mutate_prob:\n child1 = self.mutate(child1)\n\n return child0, child1", "def stage1(self):\n n = self.min\n while True:\n n, bin_ = self.sort_to_bin(n)\n if n is None:\n n = self.get_new_n(bin_)\n if n is None:\n break\n if self.viz:\n yield", "def generate_next_generation(environment, population, adaptive_mutation):\n\t# generate pairs of parents that can be used for recombination\n\tparent_pairs = parent_selection_ranking(population, num_pairs=len(population)*4)\n\n\t# generate offspring\n\toffspring = []\n\tfor i in range(len(parent_pairs)):\n\t\tchildren = create_offspring(environment, parent_pairs[i][0], parent_pairs[i][1], adaptive_mutation, num_offspring=1)\n\t\toffspring += children # concatenate children to offspring list\t\n\n\tnew_population = survival_selection_top(offspring, len(population))\n\treturn new_population", "def run(self):\n for i in range(self.generations):\n log.info(f'Training population in generation {i + 1}...')\n if i == 0:\n self.create_first_generation()\n else:\n self.create_next_generation()\n log.info(f'best individual: {self.best_individual()[1]}')\n log.info(f'best individual score: {self.best_individual()[0]}')", "def next_generation(self):\n new_population = self.population.copy()\n new_length = self.tour_length.copy()\n for i in range(self.loops):\n order_a = self.pick_one()\n order_b = self.pick_one()\n order = self.crossover(order_a, order_b)\n order_length = self.distance(order)\n new_population[i], new_length[i] = self.mutate(order_length, order)\n if new_length[i] < self.worst:\n self.tour_length[self.worst_pos] = new_length[i]\n self.population[self.worst_pos] = new_population[i]\n self.fitness[self.worst_pos] = 1/new_length[i]\n self.normalise()\n self.worst = 0\n for j in range(self.loops):\n if self.worst < self.tour_length[j]:\n self.worst = self.tour_length[j]\n self.worst_pos = j\n return new_population, new_length", "def __next__(self) -> Tuple[np.array, Tuple[int, int, int], int]:\n cur_edge_type = self.ordered_edge_types[self.iter % len(self.edge_types)]\n # Remove from freebatch_edge_types classes, what are already fully taken.\n self.freebatch_edge_types[cur_edge_type] = [\n edge_class for edge_class in self.freebatch_edge_types[cur_edge_type]\n if self.batch_num[cur_edge_type][edge_class] + 1 <= self.num_training_batches(\n cur_edge_type, edge_class)]\n\n # If we take all edges from current type, but another type is not fully taken,\n # we should to restart getting edges from current type.\n if len(self.freebatch_edge_types[cur_edge_type]) == 0:\n # We take all edges from current type.\n self.took_all_edges[cur_edge_type] = True\n # All edges from current type (from all classes) are ready to taken again.\n self.freebatch_edge_types[cur_edge_type] = list(\n range(self.edge_types[cur_edge_type]))\n # Make zero index of batches.\n self.batch_num[cur_edge_type] = [0] * self.edge_types[cur_edge_type]\n # If all edges are already taken this epoch.\n if np.all(list(self.took_all_edges.values())):\n raise StopIteration\n # Select random class from current edge type to sample batch\n # (e.g. select specific side effect for drug-drug edges).\n cur_edge_class = np.random.choice(self.freebatch_edge_types[cur_edge_type])\n self.iter += 1\n start = self.batch_num[cur_edge_type][cur_edge_class] * self.batch_size\n self.batch_num[cur_edge_type][cur_edge_class] += 1\n batch_edges = self.train_edges[cur_edge_type][cur_edge_class][\n start: start + self.batch_size]\n current_edge_type = (*cur_edge_type, cur_edge_class)\n current_edge_type_idx = self.edge_type2idx[current_edge_type]\n return batch_edges, current_edge_type, current_edge_type_idx", "def _step(self):\n self.sort()\n selection = self._select()\n offspring = self._crossover(selection)\n self._mutate(offspring)\n\n self.sort()\n if self.elite_num > 0:\n offspring[:self.elite_num] = self.population[:self.elite_num]\n\n self.population[:] = offspring\n\n self.sort()\n if self.cull_num > 0:\n self.population[-self.cull_num:] = self._initialize(self.cull_num)", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def generation_next(prev_gen):\n next_gen = []\n\n # Iter through list of graphs\n for original_graph in prev_gen:\n # Select edges to nodes which are at distance 2\n select_edges = dist2_nodepairs(original_graph)\n\n # Go through the list of possible selected edges and add one\n for test_edge in select_edges:\n test_graph = original_graph.copy()\n test_graph.add_edge(*test_edge)\n if (not graph_exists(test_graph, next_gen)) \\\n and check_test_graph(test_graph):\n next_gen.append(test_graph)\n\n return next_gen", "def next(self):\n while not self.is_stable():\n self.step()", "def advance_generation(self):\n self.generation += 1\n next_cells = [[self.cell_state['dead']] * self.cols for x in range(self.lines)]\n for i in range(self.lines):\n for j in range(self.cols):\n neighbors = self.get_neighbors(i, j)\n if self[i][j] == self.cell_state['alive']:\n if neighbors == 2 or neighbors == 3:\n next_cells[i][j] = self.cell_state['alive']\n elif self[i][j] == self.cell_state['dead']:\n if neighbors == 3:\n next_cells[i][j] = self.cell_state['alive']\n super().__init__(next_cells)", "def main():\n for i in range(5):\n check_row()\n put_beeeper_if_not()\n go_next_row()", "def nextGeneration(self):\n\n # Start a timer to calculate the time the render one generation.\n startTime = int(round(time.time() * 100000))\n\n self.generation += 1\n\n self.setNeighbors()\n self.checkAmountOfNeighbors()\n\n # Ends a timer to calculate the time the render one generation.\n endTime = int(round(time.time() * 100000))\n self.timeToCalcGeneration = (endTime - startTime)", "def next_minibatch_feed_dict(self, placeholders):\n while True:\n if self.iter % 4 == 0:\n # gene-gene relation\n self.current_edge_type_idx = self.edge_type2idx[0, 0, 0]\n elif self.iter % 4 == 1:\n # gene-drug relation\n self.current_edge_type_idx = self.edge_type2idx[0, 1, 0]\n elif self.iter % 4 == 2:\n # drug-gene relation\n self.current_edge_type_idx = self.edge_type2idx[1, 0, 0]\n else:\n # random side effect relation\n if len(self.freebatch_edge_types) > 0:\n self.current_edge_type_idx = np.random.choice(self.freebatch_edge_types)\n else:\n self.current_edge_type_idx = self.edge_type2idx[0, 0, 0]\n self.iter = 0\n\n i, j, k = self.idx2edge_type[self.current_edge_type_idx]\n if self.batch_num[self.current_edge_type_idx] * self.batch_size \\\n <= len(self.train_edges[i,j][k]) - self.batch_size + 1:\n break\n else:\n if self.iter % 4 in [0, 1, 2]:\n self.batch_num[self.current_edge_type_idx] = 0\n else:\n self.freebatch_edge_types.remove(self.current_edge_type_idx)\n\n self.iter += 1\n start = self.batch_num[self.current_edge_type_idx] * self.batch_size\n self.batch_num[self.current_edge_type_idx] += 1\n batch_edges = self.train_edges[i,j][k][start: start + self.batch_size]\n return self.batch_feed_dict(batch_edges, self.current_edge_type_idx, placeholders)", "def evolve(self, generations=10000):\n\n for gen in range(generations):\n # run the tournament\n self.tournament()\n\n # generate the next generation\n self.p = self.nextGen()", "def stage2(self):\n start = 0\n while True:\n idx = self.first_unsorted(start)\n if idx is None:\n break\n start = self.nearby_sort(idx)\n if self.viz:\n yield", "def nextGen(self):\n\n p = []\n while len(p) < len(self.p):\n #select mates and produce offspring\n p1, p2 = self.select()\n offspring = self.mate(p1, p2)\n\n #put the offspring in the next generation (with mutation)\n for child in offspring:\n child=self.mutate(child)\n p.append(child)\n \n\n # the world belongs to the new generation\n return p", "def join_scaffolds(first_end, new_scaffold, new_end, links_dict, scaffold_list, used_scaffs): \n while new_end not in used_scaffs:\n\n if new_end in links_dict and len(links_dict[new_end]) > 0:\n next_scaff_start = links_dict[new_end]\n if next_scaff_start != other_side(first_end):\n ns = next_scaff_start.split(\"_\")\n next_scaff_number = int(ns[1])\n next_scaff_dir = ns[0]\n next_scaffold = scaffold_list[next_scaff_number]\n if next_scaff_dir == \"right\":\n next_scaffold.reverse()\n if new_scaffold[-2] != other_end(next_scaffold[1]):\n new_scaffold = new_scaffold[:-1] + [other_end(new_scaffold[-2])] + [other_end(next_scaffold[1])] + next_scaffold[1:]\n else:\n new_scaffold = new_scaffold[:-1] + next_scaffold[1:]\n used_scaffs.append(new_end)\n used_scaffs.append(next_scaff_start)\n new_end = other_side(next_scaff_start)\n else: \n new_scaffold[-1] = \"join_circle\"\n used_scaffs.append(new_end)\n \n else:\n new_scaffold[-1] = \"join_not_found\"\n used_scaffs.append(new_end)\n\n return new_scaffold", "def next_generation(self):\r\n self.calculate_stats()\r\n\r\n self.population = []\r\n\r\n # Getting amounts for different types of neural net replacements\r\n random_size = self.random_round(self.population_size * self.settings[\"random_offspring\"])\r\n elitism_size = self.random_round(self.population_size * self.settings[\"elitism_offspring\"])\r\n crossover_size = self.population_size - random_size - elitism_size\r\n\r\n # Keeping best neural nets (elitism)\r\n self.population.extend(self.sorted_population[i].copy() for i in range(elitism_size))\r\n\r\n # Adding neural nets with crossover\r\n\r\n probs = self._get_selection_probabilities()\r\n crossovers = (self._uniform_crossover(*np.random.choice(self.sorted_population, 2, replace=False, p=probs)) for _ in range(crossover_size))\r\n self.population.extend(crossovers)\r\n\r\n # Mutating neural nets\r\n for neural_net in self.population:\r\n if np.random.rand() < self.settings[\"mutation_rate\"]:\r\n neural_net.mutate(self.settings[\"mutation_chance\"], self.settings[\"mutation_amount\"])\r\n\r\n # Adding random nets\r\n self.population.extend(self._random_child() for _ in range(random_size))\r\n\r\n # Shuffling new population\r\n np.random.shuffle(self.population)\r\n\r\n # Increment current generation\r\n self.current_generation += 1", "def train_loop_pre(self, current_step):\r\n pass", "def step(self):\n self.age += 1\n self.move_agent()\n self.sugar -= self.metabolism\n\n # Eat sugar\n available_sugar = self.get_sugar(self.pos).amount\n self.sugar += available_sugar\n# self.total_sugar_in_field -= available_sugar\n # Set sugar in current cell to zero\n self.get_sugar(self.pos).eat_sugar() \n \n \n \n if self.sugar == 0:\n self.model.remove_agent(self)\n \n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n \n \n if self.reproduction_and_death:\n if self.age > self.max_age: # Agent dies\n # Tax inheritance\n self.model.inheritance_tax_agent(self)\n \n if self.model.spawn_at_random:\n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n self.model.remove_agent(self) #agent dies\n \n \n else:\n #spawn new agent\n self.gen += 1\n if self.sugar != 0:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.sugar)\n else:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.model.starting_sugar)\n \n self.model.remove_agent(self) #agent dies", "def ROOMSELECTION_LOOP():\n pass", "def two_step_generator(classes: list, paths_list: list, imgs_per_class: int, shape: tuple,\n nb_win: int, greys: bool, nb_to_gen: int, img_gen: ImageDataGenerator) -> list:\n \n datawin = list() \n datagen = list()\n \n for class_ in classes:\n print(class_)\n \n # Images paths list\n class_imgs_path = [paths_list[k] for k in range(len(paths_list)) if class_ in paths_list[k]]\n\n # Randomly choose images\n class_imgs_subset = np.random.choice(class_imgs_path, size=imgs_per_class, replace=False)\n\n # Get images\n class_imgs = get_imgs(class_imgs_subset)\n\n # Step 1: resize and crop on sliding windows\n class_new_imgs = create_windows_imgs(class_imgs, shape=shape, nb_win=nb_win, greys=greys)\n class_new_imgs = np.array(flat_list(class_new_imgs))\n datawin.append(class_new_imgs)\n \n # Step 2: DataGenerator\n class_datagen = datagen_class(class_new_imgs, nb_to_gen, img_gen)\n class_datagen = class_datagen.astype(int)\n\n datagen.append(class_datagen)\n \n return datawin, datagen", "def generate_grains(self, cells):\n\t\tfor cell_num in range(cells):\n\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\tsample_cell = sample_cell[0]\n\t\t\twhile sample_cell.state != 0:\n\t\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\t\tsample_cell = sample_cell[0]\n\t\t\tsample_cell.change_state(self.init_time ,cell_num)", "def _on_single(self) -> None:\n self.row_generator.set_single()", "def _next(self, population):\n # split the population for crossover\n selected, the_rest = self._split_population(\n population, self._get_selected_number(population,\n self._selection_crossover))\n\n # crossover\n generated_items_crossover = []\n while len(selected) >= 2:\n male, female = random.sample(selected, 2)\n selected.remove(male)\n selected.remove(female)\n generated_items_crossover.extend(\n self._crossover.crossover(male, female))\n\n # if there is a impar number of selected items\n # add it back to the list\n the_rest.extend(selected)\n\n # Make the mutations\n selected, the_rest = self._split_population(\n the_rest, self._get_selected_number(population,\n self._selection_mutation))\n # mutation\n generated_items_mutation = []\n for item in selected:\n generated_items_mutation.append(self._mutation.mutate(item))\n\n # compute the population\n population = []\n population.extend(the_rest)\n population.extend(generated_items_crossover)\n population.extend(generated_items_mutation)\n\n return population", "def my_generator(log_file_list, batch_size):\n\n while 1:\n\n shuffled_list = shuffle(log_file_list)\n\n for list_slice in generate_equal_slices(shuffled_list, int(batch_size)):\n\n img_list = []\n steering_angle_list = []\n\n for row in list_slice:\n \n img_path = get_absolute_imgpath(row['image'])\n image = load_image(img_path, IMAGE_SIZE)\n\n angle = row['steering']\n\n image, angle = augment_image_angle_pair(image, angle)\n\n img_list.append(image)\n steering_angle_list.append(angle)\n\n features_slice = np.array(img_list)#.astype('float32')\n\n labels_slice = np.array(steering_angle_list)\n\n assert features_slice.shape[0] == labels_slice.shape[0]\n\n yield ({'lambda_input_1': features_slice}, {'output': labels_slice})", "def generation_process(self):\n start_time = rospy.get_time()\n end_time = start_time + self.max_time\n index = 0\n while not rospy.is_shutdown() and not self.shutdown and not self.stop_generation:\n # check time or dirt number (if this is a requirement) for termination criteria\n if self.end_after_time:\n current_time = rospy.get_time()\n if current_time > end_time:\n self.shutdown = True\n rospy.loginfo(\n f\"*** WRAPPER MESSAGE ***\\n\\n\\t'{rospy.get_caller_id()}' has reached the maximum generation time:\\n\\t({current_time} s > {end_time} s).\\n\\tNode will stop generating and publishing dirt.\\n\")\n break\n if self.end_after_number:\n # this will only be important in the beginning if max_dirt_number=0\n # (otherwise the number check after publishing will always trigger first)\n if index >= self.max_dirt_number:\n self.shutdown = True\n rospy.loginfo(\n f\"*** WRAPPER MESSAGE ***\\n\\n\\t'{rospy.get_caller_id()}' has reached the maximum dirt number ({self.max_dirt_number}).\\n\\tNode will stop generating and publishing dirt.\\n\")\n break\n\n # Create an (increasing) index, a random trust value and a random position for the new dirt\n header = Header()\n header.stamp = rospy.Time.now()\n index += 1\n trust_value = random.randint(\n self.min_trust, self.max_trust)\n pose = Pose(position=self.__generate_point_based_on_prob(),\n orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0))\n sleep_time = random.randint(\n self.time_interval_min, self.time_interval_max)\n\n # Combine everything\n dirt = DirtObject(header, index, pose, trust_value)\n\n # Publish the dirt\n self.__publish_dirt(dirt)\n\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tDirt was generated and publised: [ID: %d, position: (%.2f,%.2f), trust: %d]\\n\\tDirt generation will sleep now for %d seconds.\\n\" % (\n dirt.id, dirt.pose.position.x, dirt.pose.position.y, dirt.trust_value, sleep_time))\n\n # check dirt number (if this is a requirement)\n if self.end_after_number:\n if index >= self.max_dirt_number:\n self.shutdown = True\n rospy.loginfo(\n f\"*** WRAPPER MESSAGE ***\\n\\n\\t'{rospy.get_caller_id()}' has reached the maximum dirt number ({self.max_dirt_number}).\\n\\tNode will stop generating and publishing dirt.\\n\")\n break\n\n # Sleep rest of the (random defined) time\n rospy.sleep(sleep_time)\n\n # State some final values after stopping generation\n duration = rospy.get_time() - start_time\n duration_string = \"%.2f\" % duration\n rospy.loginfo(\n f\"*** WRAPPER MESSAGE ***\\n\\n\\t'{rospy.get_caller_id()}' has generated {index} dirt in total over {duration_string} s.\\n\")", "def iter_fun(self):\n\n run_id = self._run_id\n etopo_dir = driver_home\n topodir = driver_home\n\n # load input info\n if self._input_info == None:\n scn_fname = os.path.join(self._run_home,'scenario_pts.txt') \n scn = np.loadtxt(scn_fname)\n scn_list = scn.tolist()\n else:\n scn_list = self._input_info\n \n # total number of runs\n M = len(scn_list)\n N = 8*M + 2 # 8*M runs plus two empty bathymetry runs\n\n if run_id == N:\n raise StopIteration()\n\n else:\n \n #=========================\n # set coarse and fine grids\n #\n t_shelf = 0. # time approaching continental slope\n t_harbor = 0. # time approaching harbor\n\n if ((run_id >= 0) and (run_id < 4*M)) or (run_id == 8*M):\n #------------------\n # setrun for coarse\n #\n grid = 'coarse'\n \n self._rundata.amrdata.amr_levels_max = 4\n # coarse grid run = 10\"\n # dx = 30', 5', 1', 10\"\n self._rundata.amrdata.refinement_ratios_x = [6, 5, 6]\n self._rundata.amrdata.refinement_ratios_y = [6, 5, 6]\n self._rundata.amrdata.refinement_ratios_t = [6, 5, 6]\n\n\n # add topography (coarse)\n topofiles = self._rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n topofiles = []\n\n topofiles.append([3, 1, 4, 0., 1.e10, \\\n os.path.join(etopo_dir, 'etopo1_-130_-124_38_45_1min.asc')])\n topofiles.append([-3, 3, 4, 0., 1.e10, \\\n os.path.join(topodir, 'cc-1sec.asc')])\n\n # add regions\n regions = self._rundata.regiondata.regions \n # between shelf and CC \n regions = []\n regions.append(\\\n [2, 3, t_shelf, 1e9, -125, -124.05, 40.5, 43]) \n regions.append(\\\n [3, 4, t_harbor, 1e9, -124.26, -124.14, 41.67, 41.79])\n regions.append(\\\n [4, 4, t_harbor, 1e9, -124.218,-124.17, 41.7345, 41.77])\n\n # == fgmax.data values ==\n fgmax_files = self._rundata.fgmax_data.fgmax_files\n fgmax_files = []\n \n # for fixed grids append to this list names of any fgmax input files\n fgmax1_fname = os.path.join(driver_home,'fgmax1_coarse.txt')\n fgmax2_fname = os.path.join(driver_home,'fgmax2_coarse.txt')\n fgmax3_fname = os.path.join(driver_home,'fgmax3_coarse.txt')\n\n fgmax_files.append(fgmax1_fname) \n fgmax_files.append(fgmax2_fname) \n fgmax_files.append(fgmax3_fname) \n \n self._rundata.fgmax_data.num_fgmax_val = 2\n \n \n elif ((run_id >= 4*M) and (run_id < 8*M)) or (run_id == 8*M+1):\n #----------------\n # setrun for fine\n #\n grid = 'fine'\n \n self._rundata.amrdata.amr_levels_max = 6\n\n ## fine grid run = 2/3\"\n ## dx = 30', 5', 1', 10\", 2\", 2/3\"\n self._rundata.amrdata.refinement_ratios_x = [6, 5, 6, 5, 3]\n self._rundata.amrdata.refinement_ratios_y = [6, 5, 6, 5, 3]\n self._rundata.amrdata.refinement_ratios_t = [6, 5, 6, 5, 3]\n\n regions = self._rundata.regiondata.regions \n regions = []\n # between shelf and CC\n regions.append(\\\n [2, 4, t_shelf, 1e9, -125, -124.05, 40.5, 43]) \n regions.append(\\\n [4, 5, t_harbor, 1e9, -124.26, -124.14, 41.67, 41.79])\n regions.append(\\\n [6, 6, t_harbor, 1e9, -124.218,-124.17, 41.7345, 41.77])\n\n # add topography (fine)\n topofiles = self._rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n topofiles = []\n\n topofiles.append([3, 1, 6, 0., 1.e10, \\\n os.path.join(etopo_dir, 'etopo1_-130_-124_38_45_1min.asc')])\n topofiles.append([-3, 4, 6, 0., 1.e10, \\\n os.path.join(topodir, 'cc-1sec.asc')])\n topofiles.append([3, 6, 6, 0., 1.e10, \\\n os.path.join(topodir,'cc-1_3sec-c_pierless.asc')])\n \n # == fgmax.data values ==\n fgmax_files = self._rundata.fgmax_data.fgmax_files\n fgmax_files = []\n \n # for fixed grids append to this list names of any fgmax input files\n fgmax1_fname = os.path.join(driver_home,'fgmax1_fine.txt')\n fgmax2_fname = os.path.join(driver_home,'fgmax2_fine.txt')\n fgmax3_fname = os.path.join(driver_home,'fgmax3_fine.txt')\n\n fgmax_files.append(fgmax1_fname) \n fgmax_files.append(fgmax2_fname) \n fgmax_files.append(fgmax3_fname) \n \n self._rundata.fgmax_data.num_fgmax_val = 2\n \n\n\n #\n # set desired magnitude\n #\n if ((run_id >= 0) and (run_id < M)) \\\n or ((run_id >= 4*M) and (run_id < 5*M)):\n self.KL_Mw_desired = 8.6\n elif ((run_id >= M) and (run_id < 2*M)) \\\n or ((run_id >= 5*M) and (run_id < 6*M)):\n self.KL_Mw_desired = 8.8\n elif ((run_id >= 2*M) and (run_id < 3*M)) \\\n or ((run_id >= 6*M) and (run_id < 7*M)):\n self.KL_Mw_desired = 9.0\n elif ((run_id >= 3*M) and (run_id < 4*M)) \\\n or ((run_id >= 7*M) and (run_id < 8*M)):\n self.KL_Mw_desired = 9.2\n \n #\n # set slip distribution\n #\n run_id_mod = run_id - 100*(run_id/100)\n m = scn_list[run_id_mod]\n self.set_KL_slip(m)\n \n if run_id < 8*M:\n dir_grid_Mw = '../geoclaw_output/' + str(grid) + '_' + str(self.KL_Mw_desired)\n self._rundir = os.path.join(dir_grid_Mw, 'run_' + str(run_id_mod))\n else:\n # empty runs to obtain bathymetry\n \n dir_grid_Mw = '../geoclaw_output/' + str(grid) + '_B0'\n self._rundir = dir_grid_Mw\n self.KL_Mw_desired = 0.0\n self.set_KL_slip([0.]*len(m)) # set output\n self._rundata.clawdata.output_times = [1.0, 3.0]\n \n self._run_id += 1\n \n return self", "def next_run(self):\n self.load_run(run=self.run+1)", "def step(self):\n\n self.grains += 1\n\n if self.grains > self.spill_size:\n print('spill -> ', self.agent_id)\n self.model.spill(self)", "def new_iteration(self):\n if (\n self.inner_solutions is not None\n and self.inner_solutions.size(0) > self.raw_samples\n ):\n indices = torch.randperm(n=self.inner_solutions.size(0))[: self.raw_samples]\n self.inner_solutions = self.inner_solutions[indices]\n self.inner_values = self.inner_values[indices]", "def step():\n x_rand = sample()\n x_nearest = new_nearest_neighbour(x_rand)\n x_new = steer(x_nearest, x_rand)\n if obstacle_free(x_nearest, x_new):\n X_near = new_neighbourhood(x_new)\n x_min = x_nearest\n c_min = x_nearest.cost + x_nearest.dist_to(x_new)\n for x_near in X_near:\n if obstacle_free(x_near, x_new) and (x_near.cost + x_near.dist_to(x_new) < c_min):\n x_min = x_near\n c_min = (x_near.cost + x_near.dist_to(x_new) < c_min)\n x_new_node = add_node(x_new, x_min, True)\n for x_near in X_near:\n if obstacle_free(x_near, x_new) and (x_new_node.cost + x_near.dist_to(x_new) < x_near.cost):\n x_near.change_parent(x_new_node)\n # Here I check for goal paths and draw the circle\n updated = False\n if shared.root_path:\n updated = goal_path_resolve(shared.root_path[0])\n updated = updated or goal_path_resolve(shared.nodes[-1])\n if updated:\n diameter = shared.root_path_length\n center = ((shared.root_path[0].x + shared.root_path[-1].x) / 2,\n (shared.root_path[0].y + shared.root_path[-1].y) / 2)\n if shared.region:\n shared.region.remove_from_batch()\n shared.region = ellipse.Ellipse(center[0], center[1], diameter)\n shared.region.add_to_batch()", "def glue_trajectories(trajectories, best_parameters):\n print('Gluing trajectories...', end = ' ')\n test = True\n while test:\n old_trajectories = trajectories.copy()\n glue_trajectories_once(trajectories, best_parameters)\n test = (old_trajectories != trajectories)\n print('Done.')", "def next_gene(self):\n pass", "def crossover (self, p1, p2, p_pop, c1, c2, c_pop) :\n assert self.crossover_count < self.pop_size\n assert self.get_iteration () == self.last_gen\n self.parents.append (p1)\n self.parents.append (p2)\n self.crossover_count += 2\n if self.crossover_count == self.pop_size :\n assert (self.get_iteration () == self.last_gen)\n print (self.get_iteration ())\n sys.stdout.flush ()\n self.build_model (p_pop)\n self.sample_model (c1, c2, c_pop)\n self.crossover_count = 0\n self.parents = []\n self.children = {}\n self.last_gen += 1\n self.clear_cache ()", "def _periodically_create_records(self):\n # WINNERS holds the members that have 'won' this cycle\n winners = set()\n\n while True:\n now = time()\n start_climb = int(now / CYCLE_SIZE) * CYCLE_SIZE\n start_create = start_climb + CYCLE_SIZE * 0.5\n start_idle = start_climb + CYCLE_SIZE * 0.9\n start_next = start_climb + CYCLE_SIZE\n\n if start_climb <= now < start_create:\n yield start_create - now\n\n elif start_create <= now < start_idle and len(winners) < self._signature_count:\n logger.debug(\"c%d record creation phase. wait %.2f seconds until record creation\", int(now / CYCLE_SIZE), CYCLE_SIZE * 0.4 / self._signature_count)\n yield (CYCLE_SIZE * 0.4 / self._signature_count) * pythonrandlib.random()\n\n # find the best candidate for this cycle\n score = 0\n winner = None\n for member in self._slope.iterkeys():\n book = self.get_book(member)\n if book.score > score and not member in winners:\n winner = member\n\n if winner:\n logger.debug(\"c%d attempt record creation %s\", int(now / CYCLE_SIZE), winner.mid.encode(\"HEX\"))\n record_candidate = self._slope[winner]\n\n # prevent this winner to 'win' again in this cycle\n winners.add(winner)\n\n # # TODO: this may be and invalid assumption\n # # assume that the peer is online\n # record_candidate.history.set(now)\n\n self._dispersy.callback.unregister(record_candidate.callback_id)\n self.create_barter_record(record_candidate.candidate, winner)\n\n else:\n logger.debug(\"c%d no peers available for record creation (%d peers on slope)\", int(now / CYCLE_SIZE), len(self._slope))\n\n else:\n logger.debug(\"c%d second climbing phase. wait %d seconds until the next phase\", int(now / CYCLE_SIZE), start_next - now)\n assert now >= start_idle or len(winners) >= self._signature_count\n for record_candidate in self._slope.itervalues():\n self._dispersy.callback.unregister(record_candidate.callback_id)\n self._slope = {}\n winners = set()\n yield start_next - now", "def semigroup_generators(self):", "def _next(self) -> Tuple[np.ndarray, np.ndarray, ModelGeneratorBase]:\n pass", "def GAStep(self):\n\n self.updateMatingPool()\n self.newGeneration()", "def _insertAllSteps(self): \n self.uMics = self.inputCoordinatesTiltedPairs.get().getUntilted().getMicrographs()\n self.tMics = self.inputCoordinatesTiltedPairs.get().getTilted().getMicrographs()\n\n self.inputMics = self._createSetOfParticles('auxMics')\n self.inputMics.copyInfo(self.uMics)\n self.inputMics.setStore(False)\n \n for micU, micT in izip(self.uMics, self.tMics):\n micU.cleanObjId()\n micT.cleanObjId()\n self.inputMics.append(micU)\n self.inputMics.append(micT)\n\n self.samplingInput = self.uMics.getSamplingRate()\n \n\n if self.downsampleType.get() != OTHER:\n # If 'same as picking' or 'original' get sampling rate from input micrographs\n #TODO: Review this when downsampling before picking is possible\n self.samplingFinal = self.samplingInput\n else:\n # If 'other' multiply the input sampling rate by the factor provided\n self.samplingFinal = self.samplingInput*self.downFactor.get()\n \n # Write pos files for each micrograph\n firstStepId = self._insertFunctionStep('writePosFilesStep')\n \n # For each micrograph insert the steps\n #run in parallel\n \n deps = []\n for mic in self.inputMics:\n localDeps = [firstStepId]\n micrographToExtract = mic.getFileName()\n micName = removeBaseExt(mic.getFileName())\n micId = mic.getObjId()\n\n # If downsample type is 'other' perform a downsample\n if self.downsampleType == OTHER:\n fnDownsampled = self._getTmpPath(micName+\"_downsampled.xmp\")\n downFactor = self.downFactor.get()\n args = \"-i %(micrographToExtract)s -o %(fnDownsampled)s --step %(downFactor)f --method fourier\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_downsample\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnDownsampled\n \n # If remove dust \n if self.doRemoveDust:\n fnNoDust = self._getTmpPath(micName+\"_noDust.xmp\")\n \n thresholdDust = self.thresholdDust.get() #TODO: remove this extra variable\n args=\" -i %(micrographToExtract)s -o %(fnNoDust)s --bad_pixels outliers %(thresholdDust)f\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_filter\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnNoDust\n \n #self._insertFunctionStep('getCTF', micId, micName, micrographToExtract)\n micName = removeBaseExt(mic.getFileName())\n \n # Actually extract\n deps.append(self._insertFunctionStep('extractParticlesStep', micId, micName, \n None, micrographToExtract, prerequisites=localDeps))\n # TODO: Delete temporary files\n \n # Insert step to create output objects \n self._insertFunctionStep('createOutputStep', prerequisites=deps)", "def generational_step(self, population):\n offspring = self.variation(population, self._number_offspring)\n self.evaluation(population)\n self.evaluation(offspring)\n if self._target_populations_size is None:\n new_pop_size = len(population)\n else:\n new_pop_size = self._target_populations_size\n self.update_diagnostics(population, offspring)\n return self.selection(population + offspring, new_pop_size)", "def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]", "def train__iter__(self):\n\n # create worker-specific random number generator\n rng = create_rng_for_worker(self.model.current_epoch)\n\n while True:\n\n # select one file at random (with probability proportional to its annotated duration)\n file, *_ = rng.choices(\n self._train,\n weights=[f[\"duration\"] for f in self._train],\n k=1,\n )\n\n # select one annotated region at random (with probability proportional to its duration)\n segment, *_ = rng.choices(\n file[\"annotated\"],\n weights=[s.duration for s in file[\"annotated\"]],\n k=1,\n )\n\n # select one chunk at random (with uniform distribution)\n start_time = rng.uniform(segment.start, segment.end - self.duration)\n chunk = Segment(start_time, start_time + self.duration)\n\n X, one_hot_y, _ = self.prepare_chunk(file, chunk, duration=self.duration)\n\n y = self.prepare_y(one_hot_y)\n\n yield {\"X\": X, \"y\": y}", "def stepGenerator(self, current, target):\n\n while True:\n target = self.cfg[\"GOAL\"]\n if self.gotscript:\n if self.pathsteps in self.tc:\n terrain, topleft, botright = self.tc.get(self.pathsteps)\n pointlist = p4.getBlock(topleft, botright)\n # change logical map\n self.lmap.setPoints(terrain, pointlist)\n # change in gui, if running\n try:\n self.gui.clearPoints(pointlist)\n except:\n pass\n if self.pathsteps in self.gc:\n target = self.lmap.nearestPassable(self.gc.get(self.pathsteps))\n self.setGoal(target)\n if self.pathsteps in self.ac:\n newpos = p4.addVectors(current, self.ac.get(self.pathsteps))\n current = self.lmap.nearestPassable(newpos)\n yield newpos # scripted move is not costed or counted\n try:\n clockstart = timer() # start timer\n nextreturn = self.agent.getNext(self.lmap, current, target, self.timeremaining)\n logging.debug(nextreturn)\n clockend = timer()\n except:\n raise p4.BadAgentException()\n\n # Only time first step unless operating in 'realtime' mode. If this is realtime, and the step involved no reasoning (took less than FREE_TIME) do not count its time\n if ((not self.cfg.get(\"REALTIME\") and self.pathtime) or (\n (clockend - clockstart) < self.cfg.get(\"FREE_TIME\"))):\n steptime = 0\n else:\n steptime = (clockend - clockstart)\n previous = current\n\n # Agent may have returned single step or step plus sets of coords and colors.\n # Try/except distinguishes between them\n try:\n x = nextreturn[1][0] # fails if nextreturn is coord only\n current, configsets = nextreturn\n except TypeError:\n current = nextreturn\n finally:\n self.pathsteps += 1\n self.pathtime += steptime\n self.timeremaining -= steptime\n\n # We now consider every door open. In fact, we are just computing the final path cost, we are not\n # searching for it. So is reasonable to assume that I have all the keys along the path.\n allkeys = [k for k in self.lmap.key_and_doors.keys()]\n cost = self.lmap.getCost(current, previous, allkeys)\n # self.pathcost += self.lmap.getCost(current, previous, allkeys)\n if not self.lmap.isAdjacent(current, previous):\n cost = float('inf')\n # agent has made illegal move:\n if cost == float('inf'):\n self.updateStatus(\"Illegal move at \" + str(current) + \":\" + str(self.lmap.getCost(current)), False)\n if self.cfg[\"STRICT\"]:\n current = previous\n nextreturn = previous\n self.pathsteps -= 1\n cost = 0\n self.pathcost += cost\n yield nextreturn", "def generator(self):\n\n # generates speech turns long enough to contain at least one segment\n speech_turns = super(SpeechTurnSubSegmentGenerator, self).generator()\n\n # number of speech turns per \"speech turn batch\"\n if self.per_fold is not None:\n n_speech_turns = self.per_label * self.per_fold\n else:\n n_speech_turns = self.per_label * len(self.data_)\n\n endOfBatch = EndOfBatch()\n while True:\n\n # for each speech turn in batch\n for z in range(n_speech_turns):\n speech_turn = next(speech_turns)\n\n # for each segment in speech turn\n for X in self.iter_segments_(speech_turn['X']):\n\n # all but 'X' fields are left unchanged\n segment = dict(speech_turn)\n segment['X'] = X\n\n # remember that this segment belongs to this speech turn\n segment['z'] = z\n\n yield segment\n\n # let `batchify` know that the \"segment batch\" is complete\n yield endOfBatch", "def keep_first_iteration(self):\n self.keep_first_iteration_flag = True", "def new_iteration(self):\n if (\n self.previous_solutions is not None\n and self.previous_solutions.size(0) > self.raw_samples * self.new_iter_frac\n ):\n indices = torch.randperm(n=self.previous_solutions.size(0))[\n : int(self.raw_samples * self.new_iter_frac)\n ]\n self.previous_solutions = self.previous_solutions[indices]", "def sim_grasp_set_row(scene):\n gripper_model = burg.gripper.Robotiq2F85()\n\n target_object = None\n\n found = False\n while found == False:\n print([obj.object_type.identifier for obj in scene.objects])\n buffer_target_object = input(\"Wich object in the previous list do you want to grab ? \")\n\n i = 0\n while i < len(scene.objects):\n if (scene.objects[i].object_type.identifier == buffer_target_object):\n target_object = scene.objects[i]\n found = True\n break\n i += 1\n if found == False :\n print(\"the selected object is not in the list\")\n\n #test a grasp set and create a list of successful grasp\n grasp_set, contact_points, normals, approach_vectors = create_antipodal_grasp_set(target_object)\n sim = burg.sim.SceneGraspSimulator(target_object = target_object, gripper= gripper_model, scene=scene, verbose=False)\n scores = sim.simulate_grasp_set(grasp_set)\n successful_grasps = burg.grasp.GraspSet()\n index_successfull = []\n params_successfull = []\n for index in range(len(scores)):\n if scores[index] == 5:\n successful_grasps.add(grasp_set[index].as_grasp_set())\n index_successfull += [index]\n params_successfull+=[[contact_points[index], normals[index], approach_vectors[index]]]\n sim.dismiss()\n\n print(len(successful_grasps))\n print(\"hellllo\")\n\n time.sleep(10)\n\n #Evaluate each successfull grasp\n sim2 = burg.sim.SceneGraspSimulator(target_object = target_object, gripper= gripper_model, scene=scene, verbose=False)\n results = []\n for i, grasp in enumerate(successful_grasps):\n n_graspset, n_contact_points, n_normals, n_approach_vectors = create_antipodal_noisy_grasp_set(target_object, grasp, params_successfull[i][0][0])\n #n_graspset = perturbations.generate_perturb_grasp_set(grasp = grasp, nb_grasps = 50)\n #burg.visualization.show_grasp_set(objects = [target_object.object_type.mesh], gs = n_graspset, gripper = gripper_model, with_plane = True)\n params_noised = []\n for k in range(len(n_contact_points)):\n params_noised += [[n_contact_points[i], n_normals[i], n_approach_vectors[i]]]\n scores = sim2.simulate_grasp_set(n_graspset)\n robust = sum(scores)/len(scores)\n #metric = quality.probability_force_closure(n_graspset, params_noised)\n metric = quality.epsilon_quality(grasp, params_successfull[i] )\n results += [[grasp, robust, metric]]\n print([grasp, robust, metric])\n #results+= [[grasp, metric]]\n #results += [metric]\n\n print(results)\n sim2.dismiss()\n return results", "def __iter__(self):\n while (self.pointsleft > 0):\n current = min(self.pointsleft, self.settings.LOCALSKIPNUM)\n for i in range(current):\n self.add(self.fabric.getcoordinate())\n self.pointsleft -= self.settings.LOCALSKIPNUM\n self.pointscontroller.set(self.graph)\n yield self.graph", "def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):\n if batch_size is None:\n batch_size = self.batch_size\n if tile_one:\n batch = [self.data[self.ix]] * batch_size\n self.ix += 1\n if self.ix >= len(self.data):\n random.shuffle(self.data)\n self.ix -= len(self.data)\n else:\n batch = self.data[self.ix: self.ix+batch_size]\n if len(batch) < batch_size:\n random.shuffle(self.data)\n self.ix = batch_size - len(batch)\n batch += self.data[:self.ix]\n else:\n self.ix += batch_size\n self.batch = deepcopy(batch)\n \n self.start_list = []\n self.dest_list = []\n self.fake_start_list = []\n self.fake_dest_list = []\n\n \n for i,item in enumerate(self.batch):\n self.start_list.append(item['path'][0])\n self.dest_list.append(item['path'][-1])\n path_length = len(item['path'])\n scan = item['scan']\n fake_flag = True\n fail_flag = False\n goal_list = [goal for goal in self.paths[scan][self.start_list[-1]]]\n random.shuffle(goal_list)\n\n for goal in goal_list:\n if abs(path_length - len(self.paths[scan][self.start_list[-1]][goal])) < 1 and self.distances[scan][self.dest_list[-1]][goal] > 3:\n self.fake_dest_list.append(goal)\n # print('fake_dest',i)\n fake_flag = False\n break\n\n if fake_flag:\n fail_flag = True\n # print('fake dest error')\n self.fake_dest_list.append(item['path'][-1])\n\n fake_flag = True\n goal_list = [goal for goal in self.paths[scan][self.dest_list[-1]]]\n random.shuffle(goal_list)\n\n for goal in goal_list:\n if abs(path_length - len(self.paths[scan][self.dest_list[-1]][goal])) < 1 and self.distances[scan][self.start_list[-1]][goal] > 3:\n self.fake_start_list.append(goal)\n fake_flag = False\n break\n \n \n if fake_flag:\n fail_flag = True\n # print('fake start error')\n self.fake_start_list.append(item['path'][0])\n\n # print('scan',scan)\n\n if i != 0 and fail_flag:\n self.batch[i] = deepcopy(self.batch[i-1])\n self.start_list[-1] = self.start_list[-2]\n self.dest_list[-1] = self.dest_list[-2]\n self.fake_start_list[-1] = self.fake_start_list[-2]\n self.fake_dest_list[-1] = self.fake_dest_list[-2]\n \n\n # cnt_dest = 0\n # cnt_star = 0\n # scan = self.batch[i]['scan']\n # item = self.batch[i]\n # # print('scan after',scan)\n # fake_dest_path = self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]]\n # fake_star_path = self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]]\n # for p in item['path']:\n # if p in fake_dest_path:\n # cnt_dest += 1\n # if p in fake_star_path:\n # cnt_star += 1\n # dis_dest = self.distances[scan][item['path'][-1]][self.fake_dest_list[-1]]\n # dis_star = self.distances[scan][item['path'][0]][self.fake_start_list[-1]]\n # print('length',path_length,'fake dest',cnt_dest, 'fake start',cnt_star,'dis:','dest',dis_dest,'start',dis_star)\n\n # print('ori',item['path'])\n # print('des',self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]])\n # print('sta',self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]])\n # print('')", "def run_all(rows):\n\n print \"start row1\"\n result = generate_1st_column(rows)\n print \"start row 2 - 10\"\n second_10th = generate_2nd_10th_column(rows)\n print \"add row 2 - 10\"\n result = generate_column(second_10th, result)\n print \"start row 11 - 19\"\n eleventh_19th = generate_11th_19th_column(rows)\n print \"add row 11 - 19\"\n result = generate_column(eleventh_19th, result)\n print \"start row 20\"\n twentiesth = generate_20th_column(rows)\n print \"add row 20\"\n result = generate_column(twentiesth, result)\n print \"adding row 20 completed.\"\n\n return result", "def hedge_maker(tail_width, head_width, node_ls, self_loops):\n tail_set = set()\n head_set = set()\n\n tail_full = False\n head_full = False\n \n while tail_full == False: \n selection = random.choice(node_ls)\n tail_set.add(selection)\n if len(tail_set) == tail_width:\n tail_full = True\n\n\n\n \n while head_full == False:\n selection = random.choice(node_ls)\n\n if self_loops == True: #if we don't want self loops, this checks to make sure the selection isn't one of the tail nodes\n head_set.add(selection)\n else: \n if selection not in tail_set:\n head_set.add(selection)\n\n if len(head_set) == head_width:\n head_full = True\n\n frozen_head = frozenset(head_set)\n frozen_tail = frozenset(tail_set)\n \n return (frozen_tail, frozen_head) #returns as a tuple of frozensets so that it will only be added to the edge_set if it's a unique edge", "def _batcher(self, rows):\n row_count = 0\n batch = []\n batch_count = 1\n\n total_rows_modified = 0\n throttle_count = 0\n\n i = 0\n for row in rows:\n if row_count > self.batch_size - 1:\n logger.debug(f\"row_count={row_count} batch_size={self.batch_size} and batch={len(batch)}\")\n # Yield the previous batch\n yield batch\n\n # Start the new batch\n batch = []\n batch.append(row)\n row_count = 1\n\n batch_count += 1\n # break # toggle to load one batch only\n else:\n row_count += 1\n batch.append(row)\n\n # Put in a sleep timer to throttle how hard we hit the database\n if self.throttle_time and self.throttle_size and (throttle_count > self.throttle_size - 1):\n logger.info(f\"Sleeping for {self.throttle_time} seconds... row: {i}\")\n time.sleep(int(self.throttle_time))\n throttle_count = 0\n elif self.throttle_time and self.throttle_size:\n throttle_count += 1\n i += 1\n\n yield batch", "def _generate_instances(self, single_traj):\n return [single_traj[:2].values]", "def post_backward_generator(self):\n pass", "def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):\n if batch_size is None:\n batch_size = self.batch_size\n if tile_one:\n batch = [self.data[self.ix]] * batch_size\n self.ix += 1\n if self.ix >= len(self.data):\n random.shuffle(self.data)\n self.ix -= len(self.data)\n else:\n batch = self.data[self.ix: self.ix+batch_size]\n if len(batch) < batch_size:\n random.shuffle(self.data)\n self.ix = batch_size - len(batch)\n batch += self.data[:self.ix]\n else:\n self.ix += batch_size\n self.batch = deepcopy(batch)\n \n self.start_list = []\n self.dest_list = []\n self.fake_start_list = []\n self.fake_dest_list = []\n\n \n for i,item in enumerate(self.batch):\n self.start_list.append(item['path'][0])\n self.dest_list.append(item['path'][-1])\n path_length = len(item['path'])\n scan = item['scan']\n fake_flag = True\n fail_flag = False\n goal_list = [goal for goal in self.paths[scan][self.start_list[-1]]]\n random.shuffle(goal_list)\n\n for goal in goal_list:\n if abs(path_length - len(self.paths[scan][self.start_list[-1]][goal])) < 1 and self.distances[scan][self.dest_list[-1]][goal] > 3:\n self.fake_dest_list.append(self.paths[scan][self.start_list[-1]][goal])\n # print('fake_dest',i)\n fake_flag = False\n break\n\n if fake_flag:\n fail_flag = True\n # print('fake dest error')\n self.fake_dest_list.append(item['path'])\n\n fake_flag = True\n goal_list = [goal for goal in self.paths[scan][self.dest_list[-1]]]\n random.shuffle(goal_list)\n\n for goal in goal_list:\n if abs(path_length - len(self.paths[scan][self.dest_list[-1]][goal])) < 1 and self.distances[scan][self.start_list[-1]][goal] > 3:\n self.fake_start_list.append(self.paths[scan][self.dest_list[-1]][goal])\n fake_flag = False\n break\n \n \n if fake_flag:\n fail_flag = True\n # print('fake start error')\n self.fake_start_list.append(item['path'])\n\n # print('scan',scan)\n\n if i != 0 and fail_flag:\n self.batch[i] = deepcopy(self.batch[i-1])\n self.start_list[-1] = self.start_list[-2]\n self.dest_list[-1] = self.dest_list[-2]\n self.fake_start_list[-1] = self.fake_start_list[-2]\n self.fake_dest_list[-1] = self.fake_dest_list[-2]\n \n\n # cnt_dest = 0\n # cnt_star = 0\n # scan = self.batch[i]['scan']\n # item = self.batch[i]\n # # print('scan after',scan)\n # fake_dest_path = self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]]\n # fake_star_path = self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]]\n # for p in item['path']:\n # if p in fake_dest_path:\n # cnt_dest += 1\n # if p in fake_star_path:\n # cnt_star += 1\n # dis_dest = self.distances[scan][item['path'][-1]][self.fake_dest_list[-1]]\n # dis_star = self.distances[scan][item['path'][0]][self.fake_start_list[-1]]\n # print('length',path_length,'fake dest',cnt_dest, 'fake start',cnt_star,'dis:','dest',dis_dest,'start',dis_star)\n\n # print('ori',item['path'])\n # print('des',self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]])\n # print('sta',self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]])\n # print('')", "def _core(self, generation_start: int, generation_end: int):\n\n # Evaluate fitness for each objective\n self._population = self._evaluate_fitness(population=self._population)\n\n #  Get population fitness\n population_fitness = self._population.fitness\n\n # Sort Pareto front\n fast_non_dominated_sort(population_fitness)\n\n # Calculate crowding\n calculate_crowding(population_fitness)\n\n info = \"(NSGAII) Generations (form %d to %d)\" % (generation_start, generation_end)\n for generation in tqdm(range(generation_start, generation_end), desc=info):\n\n # Annotate algorithm performance\n self._annotate(generation=generation)\n\n # Apply selection\n offspring = self.selection.select(population=self._population,\n new_pop_length=self._population.size)\n\n # Apply cross-over\n offspring = self.crossover.cross_population(offspring)\n\n # Introduces mutations\n if self.mutation is not None:\n offspring = self.mutation.mutate(population=offspring, mutation_rate=self.mutation_rate)\n\n # Evaluate offspring\n offspring = self._evaluate_fitness(population=offspring)\n\n # Restart parent solutions\n restart_solutions(self._population.fitness)\n\n # Merge parents and offspring\n parents_offspring = self._population.merge_population(self._population, offspring)\n\n #  Get parents_offspring fitness\n parents_offspring_fitness = parents_offspring.fitness\n\n # Sort Pareto front\n fast_non_dominated_sort(parents_offspring_fitness)\n\n # Calculate crowding\n calculate_crowding(parents_offspring_fitness)\n\n # Set new population\n self._set_new_population(parents_offspring)", "def batch_generator(samples, batch_size=32, is_training=True):\n num_samples = len(samples)\n while True: # Loop forever so the generator never terminates\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples.iloc[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples.iterrows():\n batch_sample = batch_sample[1]\n name = DATA_PATH + '/IMG/'+batch_sample['center'].split('/')[-1]\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n center_angle = float(batch_sample['steering'])\n images.append(center_image)\n angles.append(np.clip(center_angle,-1,1))\n if is_training:\n # Center Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip(center_angle*-1.0,-1,1))\n # Left\n name = DATA_PATH + '/IMG/'+batch_sample['left'].split('/')[-1]\n correction = 0.2\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n images.append(center_image)\n angles.append(np.clip(center_angle+correction,-1,1))\n # Left Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip((center_angle+correction)*-1.0,-1,1))\n # Right\n name = DATA_PATH + '/IMG/'+batch_sample['right'].split('/')[-1]\n correction = -0.2\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n images.append(center_image)\n angles.append(np.clip(center_angle+correction,-1,1))\n # Right Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip((center_angle+correction)*-1.0,-1,1))\n \n X_train = np.array(images)\n y_train = np.array(angles)\n yield shuffle(X_train, y_train)", "def take_leader(self):", "def next_generation(self):\n new_board = self.array.copy()\n for cell in self.cells:\n cell.update(new_board)\n \n if np.array_equal(self.prev_array, new_board):\n self.game.stable = True\n else:\n self.prev_array = self.array\n self.array = new_board", "def generate(self):\n self.generate_points()\n self.generate_edges()", "def simulationTwoDrugsDelayedTreatment():\n\n # TODO", "def _selection(self) -> None:\n # The size of the new population must be the same as the prev. one\n max_size_of_pop = self._pop_size\n\n # Copy 50% of best chromosomes to the next generation\n num_of_pop_to_next_gen = round(self._pop_size / 2)\n max_size_of_pop -= num_of_pop_to_next_gen\n self._population = self._population[0:num_of_pop_to_next_gen]\n\n # Mutate 25% of the prev. population and add to the next generation\n num_of_mutated_to_next_gen = round(max_size_of_pop / 2)\n max_size_of_pop -= num_of_mutated_to_next_gen\n for i in range(num_of_mutated_to_next_gen):\n # Mutate one member from the prev. generation\n img, _ = self._population[i]\n new_mutated_member = self._mutate(img)\n\n # Apply more mutation to one chromosome(from 0 to 100)\n for i in range(rand.randint(0, 100)):\n new_mutated_member = self._mutate(new_mutated_member)\n\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the mutated chromosome to the next generation\n self._population.append((new_mutated_member, fitval))\n\n # For remaining 25% of the prev. population do crossing overs\n num_of_crossing_overs_to_next_gen = max_size_of_pop\n max_size_of_pop -= num_of_crossing_overs_to_next_gen\n\n for i in range(num_of_crossing_overs_to_next_gen):\n # Choose 2 chromosomes, then do one crossing over\n img_ext_1, _ = self._population[i]\n img_ext_2, _ = self._population[rand.randint(0, num_of_pop_to_next_gen)]\n\n new_mutated_member = self._crossing_over(img_ext_1, img_ext_2)\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the derived chromosome to the next generation.\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((new_mutated_member, fitval))\n\n # Sort the new generation in increasing order based on the fitness value of each chromosome\n self._population.sort(key=lambda x: x[1])\n print(f'Best chromosome fit value: {self._population[0][1]}')", "def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:", "def run(self):\n values_to_set = self._load().get_initial_values()\n\n best_data = []\n worst_data = []\n found = False\n overall_nb_generations_done = 0\n restart_counter = 0\n\n while overall_nb_generations_done < self._max_nb_generations and not found:\n new_population = ga_utils.create_generation(self._population_size, values_to_set)\n\n nb_generations_done = 0\n remember_the_best = 0\n nb_generations_without_improvement = 0\n\n # Loop until max allowed generations is reached or a solution is found\n while nb_generations_done < self._max_nb_generations and not found:\n # Rank the solutions\n ranked_population = ga_utils.rank_population(new_population)\n best_solution = ranked_population[0]\n best_score = best_solution.fitness()\n worst_score = ranked_population[-1].fitness()\n best_data.append(best_score)\n worst_data.append(worst_score)\n\n # Manage best value and improvements among new generations over time\n if remember_the_best == best_score:\n nb_generations_without_improvement += 1\n else:\n remember_the_best = best_score\n if 0 < self._restart_after_n_generations_without_improvement < nb_generations_without_improvement:\n print(\"No improvement since {} generations, restarting the program\".\n format(self._restart_after_n_generations_without_improvement))\n restart_counter += 1\n break\n\n # Check if problem is solved and print best and worst results\n if best_score > 0:\n print(\"Problem not solved on generation {} (restarted {} times). Best solution score is {} and \"\n \"worst is {}\".format(nb_generations_done, restart_counter, best_score, worst_score))\n # Not solved => select a new generation among this ranked population\n # Retain only the percentage specified by selection rate\n next_breeders = ga_utils.pick_from_population(ranked_population, self._selection_rate,\n self._random_selection_rate)\n\n children = ga_utils.create_children_random_parents(next_breeders, self._nb_children)\n new_population = ga_utils.mutate_population(children, self._mutation_rate)\n\n nb_generations_done += 1\n overall_nb_generations_done += 1\n else:\n print(\"Problem solved after {} generations ({} overall generations)!!! Solution found is:\".\n format(nb_generations_done, overall_nb_generations_done))\n best_solution.display()\n found = True\n print(\"It took {} to solve it\".format(tools.get_human_readable_time(self._start_time, time())))\n\n if not found:\n print(\"Problem not solved after {} generations. Printing best and worst results below\".\n format(overall_nb_generations_done))\n ranked_population = ga_utils.rank_population(new_population)\n best_solution = ranked_population[0]\n worst_solution = ranked_population[-1]\n print(\"Best is:\")\n best_solution.display()\n print(\"Worst is:\")\n worst_solution.display()\n\n graphics.draw_best_worst_fitness_scores(best_data, worst_data)", "def _cross_parents(self):\n while len(self.children_population) < self.children_count:\n parent_1, parent_2 = random.sample(self.population, k=2)\n self.children_population.extend(self.crossover.cross(parent_1, parent_2))", "def _generate_pileups(self):\n pass", "def start_next_row(self) -> None:\n\n if self._is_ringing_rounds:\n self._row = self._rounds\n else:\n self._row = self.row_generator.next_row(self.stroke)\n if len(self._row) < len(self._rounds):\n # Add cover bells if needed\n self._row.extend(self._rounds[len(self._row):])\n\n for (index, bell) in enumerate(self._row):\n self.expect_bell(index, bell)", "def regenerateTable():\n deleteAll()\n\n # Start generating records from start nodes, and continue generating\n # records for their children until either the bottom of the ANAD_PART_OF\n # tree is reached, or stop nodes are reached.\n\n for perspective in Perspectives.Iterator():\n perspectiveName = perspective.getName()\n starts = PerspectiveAmbits.getStartAmbitForPerspective(perspectiveName)\n stops = PerspectiveAmbits.getStopAmbitForPerspective(perspectiveName)\n startNodeOids = sets.Set(starts.keys())\n stopNodeOids = sets.Set(stops.keys())\n \n #print perspectiveName\n #print startNodeOids\n #print stopNodeOids\n \n startApos = [PartOfs.getPrimaryPathApoForNodeOid(nodeOid)\n for nodeOid in startNodeOids]\n apoList = startApos[:]\n\n while len(apoList) > 0:\n partOf = apoList.pop()\n\n # create POP record for this part of.\n \n pop = AnadPartOfPerspectiveDb.AnadPartOfPerspectiveDbRecord()\n pop.setPerspectiveName(perspectiveName)\n pop.setApoOid(partOf.getOid())\n pop.setIsAncestor(False)\n pop.setNodeOid(partOf.getNodeOid())\n pop.insert()\n \n #if partOf.getOid() == 68470:\n # print \n # print pop.getPerspectiveName()\n # print pop.getApoOid()\n # print pop.isAncestor()\n # print pop.getNodeOid()\n # print\n # print partOf.getOid()\n # print partOf.getSpecies()\n # print partOf.getNodeStartStageOid()\n # print partOf.getNodeEndStageOid()\n # print partOf.getPathStartStageOid()\n # print partOf.getPathEndStageOid()\n # print partOf.getNodeOid()\n # print partOf.getSequence()\n # print partOf.getDepth()\n # print partOf.getFullPathEmapas()\n # print partOf.getFullPath()\n # print partOf.isPrimaryPath()\n # print partOf.getParentApoOid()\n\n _addToKnowledge(pop)\n\n # if this is not a stop node, then add all its part-of kids\n # to the list of APOs to generate POP records for.\n if partOf.getNodeOid() not in stopNodeOids:\n apoList.extend(PartOfs.getByParentOid(partOf.getOid()))\n\n # for each start node, add any ancestor APOs that were not added\n # by the above process.\n ancesApos = sets.Set()\n for apo in startApos:\n parentApoOid = apo.getParentApoOid()\n if parentApoOid != None:\n parentApo = PartOfs.getByOid(parentApoOid)\n if (_byApoOid.get(parentApoOid) == None or\n _byApoOid[parentApoOid].get(perspectiveName) == None):\n ancesApos.add(parentApo)\n\n while len(ancesApos) > 0:\n ancesApo = ancesApos.pop()\n # create POP record for this ancestor\n pop = AnadPartOfPerspectiveDb.AnadPartOfPerspectiveDbRecord()\n pop.setPerspectiveName(perspectiveName)\n pop.setApoOid(ancesApo.getOid())\n pop.setIsAncestor(True)\n pop.setNodeOid(ancesApo.getNodeOid())\n pop.insert()\n _addToKnowledge(pop)\n\n # if this APO has a parent that hasn't yet been processed then\n # add it to list of ancestor APOs to generate records for.\n parentApoOid = ancesApo.getParentApoOid()\n if (parentApoOid != None and\n (_byApoOid.get(parentApoOid) == None or\n _byApoOid[parentApoOid].get(perspectiveName) == None)):\n parentApo = PartOfs.getByOid(parentApoOid)\n ancesApos.add(parentApo)\n \n \n \n return", "def step(self, generation_idx, log_writer):\n # Sort the population by fitness and select the top\n sorted_fit_idxs = list(reversed(sorted(zip(self.fitnesses, itools.count()))))\n sorted_pop = [self.population[ix] for _, ix in sorted_fit_idxs]\n\n # recalculate the fitness of the elite subset and find the best individual\n max_fitness, max_idx = sorted_fit_idxs[0]\n for cp_from, cp_to in zip(sorted_pop, self.selected):\n cp_to.model.load_state_dict(cp_from.model.state_dict())\n\n log_writer.add_scalar(\"Best/fitness\", sorted_fit_idxs[0][0], generation_idx)\n log_writer.add_scalar(\"Best/learning rate\", self.population[max_idx].learning_rate, generation_idx)\n log_writer.add_scalar(\"Best/avg instinct activation\", self.instinct_average_list[max_idx], generation_idx)\n log_writer.add_scalar(\"Worst/fitness\", sorted_fit_idxs[-1][0], generation_idx)\n log_writer.add_scalar(\"Worst/elite fitness\", sorted_fit_idxs[self.to_select - 1][0], generation_idx)\n log_writer.add_scalar(\"Average fitness\", sum(self.fitnesses) / len(self.fitnesses), generation_idx)\n\n # next generation\n for i in range(self.pop_size):\n if i == max_idx:\n continue\n\n dart = int(torch.rand(1) * self.to_select)\n # Select parent and child\n parent = self.selected[dart]\n child = self.population[i]\n # copy the parent genes to the child genes\n child.model.load_state_dict(parent.model.state_dict())\n child.learning_rate = parent.learning_rate\n # apply mutation to model parameters\n for p in child.model.get_evolvable_params():\n mutation = torch.randn_like(p.data) * self.sigma\n p.data += mutation\n # apply mutation to learning rate\n child.learning_rate += torch.randn((1, 1)).item() * 0.001\n if child.learning_rate < 0:\n child.learning_rate *= -1\n\n if self.sigma > self.min_sigma:\n self.sigma *= self.sigma_decay\n elif self.sigma < self.min_sigma:\n self.sigma = self.min_sigma\n\n return (self.population[max_idx], max_fitness)", "def step(individuals, grammar, replacement, selection, fitness_function, best_ever):\n #Select parents\n parents = selection(individuals)\n #Crossover parents and add to the new population\n new_pop = []\n while len(new_pop) < GENERATION_SIZE:\n new_pop.extend(onepoint_crossover(*random.sample(parents, 2)))\n #Mutate the new population\n new_pop = list(map(int_flip_mutation, new_pop))\n #Evaluate the fitness of the new population\n evaluate_fitness(new_pop, grammar, fitness_function)\n #Replace the sorted individuals with the new populations\n individuals = replacement(new_pop, individuals)\n best_ever = max(best_ever, max(individuals))\n return individuals, best_ever", "def generate(\n seeds=10,\n param_num_nodes=7,\n mode='train',\n param_dim=10,\n param_sel=100,\n param_mu=10,\n param_br=0.05,\n param_activity_wt=None,\n A=None,\n sp_to_id=None,\n min_coord=None,\n max_coord=None,\n org_pts=None,\n ):\n global dim, sel, mu, br, activity_wt, tree_lc, tree_rc, num_nodes\n\n dim=param_dim\n sel=param_sel\n mu=param_mu\n br=param_br\n activity_wt=param_activity_wt\n num_nodes = param_num_nodes\n\n sp_root = 0\n tree = None\n\n if mode == 'train':\n tree, tree_lc, tree_rc = generate_tree(sp_root, num_nodes)\n if param_activity_wt is None:\n # weights for the linear activity function\n num_wts = int(((dim * (dim + 1))/2) + 1)\n activity_wt = np.random.normal(0, 1, num_wts)\n\n if org_pts is None:\n org_pts = []\n # simulate data points\n # format: exampleID, species, values\n # region, species, coord1, coord2, ...., activity_value\n\n for i in tqdm(range(int(seeds))):\n pt_id = i\n\n # pick a random point of d-dimension\n rand_pt = np.random.uniform(min_coord, max_coord, dim)\n curr_pt = np.append([pt_id, sp_root], rand_pt)\n curr_activity = get_activity(modify_pt(rand_pt), activity_wt)\n # print('curr_pt:', curr_pt, 'curr_activity:', curr_activity); exit(0)\n org_pts.append(np.append(curr_pt, curr_activity))\n\n generated_points = []\n full_org_pts = []\n\n if mode == 'train':\n pool = Pool(16)\n sample_bag = pool.map(generate_bag, org_pts)\n for item in sample_bag:\n for val in item:\n val = list(val)\n full_org_pts.append(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n else:\n for val in org_pts:\n val = list(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n\n return generated_points, activity_wt, org_pts, full_org_pts, tree", "def registration_iteration_ended(self):", "def advance_generation(self):\n # Todo: implement\n for particle in self.particles:\n if particle.value > particle.best_value:\n particle.best_position = particle.x\n particle.best_value = particle.value\n rp = random.uniform(0.0, 1.0)\n rg = random.uniform(0.0, 1.0)\n particle.v = self.w * particle.v + self.phip * rp * (particle.best_position - particle.x) + self.phig * rg * (self.get_best_position() - particle.x)\n particle.x = particle.x + particle.v\n particle.evaluated = False", "def start_one_step(self):\r\n new_infected_list = []\r\n old_infected_list = copy.deepcopy(self.infected_list)\r\n new_recovered_list = []\r\n old_recovered_list = copy.deepcopy(self.recovered_list)\r\n # For each infected node\r\n for infected_nid in old_infected_list:\r\n infected_node = self.node_dict[infected_nid]\r\n # For each neighbor\r\n for dst_nid in infected_node.get_dst_nid_list(self.graph):\r\n dst_node = self.node_dict[dst_nid]\r\n # Infect susceptible nodes with probability [p]\r\n if dst_node.state is NodeState.SUSCEPTIBLE and random.random() < self.p:\r\n dst_node.infected(self.i)\r\n new_infected_list.append(dst_nid)\r\n\r\n # Minus 1 turn of (remaining) infected days for all infected nodes\r\n infected_node.minus_one_state_day()\r\n # If infected node is recovered\r\n if infected_node.check_finish_infection():\r\n # Infected node get recovered\r\n infected_node.recovered(self.r)\r\n # Remove from infected list\r\n self.infected_list.remove(infected_nid)\r\n # Append to recovered list\r\n new_recovered_list.append(infected_nid)\r\n\r\n # Add newly infected nodes into infected list\r\n self.infected_list += new_infected_list\r\n\r\n # For each recovered node\r\n for recovered_nid in old_recovered_list:\r\n recovered_node = self.node_dict[recovered_nid]\r\n # Minus 1 turn of (remaining) recovered days for all recovered nodes\r\n recovered_node.minus_one_state_day()\r\n # If infected node is recovered\r\n if recovered_node.check_finish_recovery():\r\n # Recovered node get recovered\r\n recovered_node.susceptible()\r\n # Remove from recovered list\r\n self.recovered_list.remove(recovered_nid)\r\n\r\n # Add newly recovered nodes into recovered list\r\n self.recovered_list += new_recovered_list", "def createTrainingSet(id_row, full_id_row, usedUri, r, amount):\n \n nbIdRequired = amount * len(id_row)\n \n result = []\n while len(result) < nbIdRequired/2:\n newRow = dict()\n row = getRandomRow(id_row, r) \n \n if row[\"uri\"] in usedUri:\n continue\n #id_row.pop(row[\"uri\"])\n \n usedUri.add(row[\"uri\"])\n newRow[\"uri\"] = row[\"uri\"]\n newRow[\"question\"] = row[\"subject\"]+\" \"+row[\"content\"]\n newRow[\"bestanswer\"] = row[\"bestanswer\"]\n newRow[\"label\"] = \"0\"\n result.append(newRow)\n \n print str(len(result))+\"\\r\",\n \n while len(result) < nbIdRequired:\n \n newRow = dict()\n row = getRandomRow(id_row, r)\n\n if row[\"uri\"] in usedUri:\n continue\n id_row.pop(row[\"uri\"])\n \n usedUri.add(row[\"uri\"])\n newRow[\"uri\"] = row[\"uri\"]\n newRow[\"question\"] = row[\"subject\"]+\" \"+row[\"content\"]\n rRow = getRandomRow(full_id_row, r)\n while rRow == row:\n rRow = getRandomRow(full_id_row, r)\n \n newRow[\"bestanswer\"] = rRow[\"bestanswer\"]\n newRow[\"label\"] = \"1\" \n result.append(newRow)\n \n print str(len(result))+\"\\r\",\n \n header = [\"uri\", \"question\", \"bestanswer\", \"label\"]\n return (header,result)", "def flow(self, batch_size=32, output='both', crops=0):\n while True:\n for dataset in self.input_sets:\n X = self.training_set['input/'+dataset]\n y = self.training_set['target/'+dataset]\n y_seg = self.training_set['seg_map/'+dataset]\n\n for i in range(int(math.ceil(X.shape[0]/2000))):\n index = list(range(0,X.shape[0]))\n sample = random.sample(index, batch_size)\n sample.sort()\n X_batch = X[sample, ...]\n y_batch = y[sample, ...]\n y_seg_batch = y_seg[sample, ...]\n X_batch = self.augment(X_batch)\n\n if crops > 0:\n (X_batch, y_batch,\n y_seg_batch) = _augmentors.random_crops(\n X_batch, y_batch, y_seg_batch, n_crops=crops, crop_dim=20)\n\n if output=='both':\n yield (X_batch, [y_batch, y_seg_batch])\n elif output=='seg':\n yield (X_batch, y_seg)\n elif output=='density':\n yield (X_batch, y_batch)\n else:\n raise Exception('output must be \"density\", \"seg\" or \"both\"')", "def __nextRun(self, t1, t2):\n if self.t1==t1:\n # rerun from t1\n if self.t2!=t2:\n raise Exception(\"bad t2 (%f!=%f)\" % (t2, self.t2)) \n \n loader = fac.FacManager(self.metafor)\n nt = loader.lookForFile(self.nbFacs) #(0)\n loader.eraseAllFrom(nt)\n self.runOK = self.metafor.getTimeIntegration().restart(nt)\n else:\n # new time step\n tsm = self.metafor.getTimeStepManager()\n dt=t2-t1\n dtmax=dt\n tsm.setNextTime(t2, 1, dtmax) \n \n loader = fac.FacManager(self.metafor)\n nt1 = loader.lookForFile(self.nbFacs) #(0)\n nt2 = loader.lookForFile(self.nbFacs+1) #(1)\n if not self.saveAllFacs:\n loader.erase(nt1) # delete first fac\n self.runOK = self.metafor.getTimeIntegration().restart(nt2)\n if self.saveAllFacs:\n self.nbFacs+=1", "def batch_generator(training_data, sequence_length=15, window_size = 15):\n engine_ids = list(training_data[\"engine_id\"].unique())\n temp = training_data.copy()\n for id_ in engine_ids:\n indexes = temp[temp[\"engine_id\"] == id_].index\n traj_data = temp.loc[indexes]\n cutoff_cycle = max(traj_data['cycle']) - sequence_length - window_size + 1\n \n if cutoff_cycle<0:\n drop_range = indexes\n print(\"sequence_length + window_size is too large\")\n else:\n cutoff_cycle_index = traj_data['cycle'][traj_data['cycle'] == cutoff_cycle+2].index\n drop_range = list(range(cutoff_cycle_index[0], indexes[-1] + 1))\n \n temp.drop(drop_range, inplace=True)\n indexes = list(temp.index)\n del temp\n \n feature_number = training_data.shape[1]-3\n\n x_shape = (len(indexes), sequence_length, window_size, feature_number)\n x_batch = np.zeros(shape=x_shape, dtype=np.float32)\n y_shape = (len(indexes))\n y_batch = np.zeros(shape=y_shape, dtype=np.float32)\n\n alt_index = indexes[0]\n for batch_index, index in enumerate(indexes):\n y_batch[batch_index] = training_data.iloc[index+window_size-2+sequence_length,-1]\n \n\n \n if index-alt_index==1 and batch_index!=0:\n temp_window = training_data.iloc[index+sequence_length-1:index+sequence_length-1 + window_size, 2:-1].values.reshape(1,window_size,-1)\n x_batch[batch_index] = np.concatenate((x_batch[batch_index-1][1:],temp_window))\n else:\n for seq in range(sequence_length):\n x_batch[batch_index][seq] = training_data.iloc[index+seq:index+seq + window_size, 2:-1].values\n alt_index = index\n\n \n return x_batch, y_batch", "def source_review(env, \r\n number, \r\n counter,\r\n generation,\r\n generation_list_come,\r\n generation_list_wait,\r\n generation_list_begin,\r\n generation_list_finish,\r\n df_simtime,\r\n generation_list_name,g2_list_name):\r\n# global g2_list_name\r\n for i in range(number):\r\n if i == 0:\r\n t = generation_list_come[i]#到达时间服从指数分布,此处的t为间隔时间\r\n else:\r\n t = generation_list_come[i] - generation_list_come[i-1]\r\n yield env.timeout(t)\r\n serve_time = np.random.choice(df_simtime['sim_time'])#得到模拟数据\r\n # print(serve_time)\r\n c = document(env, g2_list_name[i], generation, counter, time_in_fac,generation_list_begin,generation_list_wait,generation_list_finish,serve_time,generation_list_name)\r\n env.process(c)", "def startGeneration(variant, resolution, loops):\n # Check for valid resolution\n if resolution % 2 != 0:\n print (\"Resolution should be an even integer.\")\n return\n\n # Set high score:\n if variant == 20:\n high_score = 11365950\n if variant == 40:\n high_score = 17858670\n if variant == 60:\n high_score = 24239310\n\n # House distirbution:\n familyHome_count = 0.6 * variant\n bungalow_count = 0.25 * variant\n maison_count = 0.15 * variant\n\n for loops in range(loops):\n\n # Initialize Classlist\n placed_houses = []\n placed_water = []\n\n # Initialize values\n gr = generic.genMap(180 * resolution, 160 * resolution)\n\n # Set length and width based on resultion.\n fam_length = int(resolution * 8)\n fam_width = int(resolution * 8)\n fam_freespace = int(resolution * 2)\n\n bung_length = int(resolution * 7.5)\n bung_width = int(resolution * 10)\n bung_freespace = int(resolution * 3)\n\n mais_length = int(resolution * 10.5)\n mais_width = int(resolution * 11)\n mais_freespace = int(resolution * 6)\n\n # Water\n # Generate water parts\n water_parts = genWater(gr, resolution)\n\n # Place water parts in grid:\n for part in range(len(water_parts)):\n W = 0\n\n # Loop until correctly placed.\n while W != 1:\n\n # Define class instance\n Water = class_house.House(water_parts[part][1], water_parts[part][0],\n 1, 0, 0, 4, \"W\", resolution)\n\n ngrid = genHome(gr, Water)\n\n # Check for success:\n if ngrid == False:\n print (\"No succesfull placement Water\")\n else:\n print (\"Water {0} placed!\".format(W))\n gr = list(ngrid)\n\n # Add water to list\n placed_houses.append(Water)\n\n W = 1\n\n # Maisons\n M = 0\n while M != maison_count:\n\n # Define class instance\n Maison = class_house.House(mais_length, mais_width,\n mais_freespace, 610000, 6, 1, \"M\", resolution)\n\n ngrid = genHome(gr, Maison)\n\n # Check if house succsfully placed:\n if ngrid == False:\n print (\"No succesfull placement Maison\")\n else:\n print (\"Maison {0} placed!\".format(M))\n gr = list(ngrid)\n\n # Add maison to list\n placed_houses.append(Maison)\n\n M += 1\n\n # Then bungalows\n B = 0\n while B != bungalow_count:\n\n # Define class instance\n Bungalow = class_house.House(bung_length, bung_width,\n bung_freespace, 399000, 4, 2, \"B\", resolution)\n\n ngrid = genHome(gr, Bungalow)\n\n # Check for succes:\n if ngrid == False:\n print (\"No succesfull placement Bungalow\")\n else:\n print (\"Bungalow {0} placed!\".format(B))\n gr = list(ngrid)\n\n # Add maison to list\n placed_houses.append(Bungalow)\n\n B += 1\n\n # Then Family homes\n F = 0\n while F != familyHome_count:\n\n # Define class instance\n Familyhome = class_house.House(fam_length, fam_width,\n fam_freespace, 285000, 3, 3, \"F\", resolution)\n\n ngrid = genHome(gr, Familyhome)\n\n # Check for succes:\n if ngrid == False:\n print (\"No succesfull placement Family Home\")\n else:\n print (\"Family home {0} placed!\".format(F))\n gr = list(ngrid)\n\n # Add maison to list\n placed_houses.append(Familyhome)\n\n F += 1\n\n # Calculate score using Placed houses\n sc = generic.calculateScore(gr, placed_houses)\n name = (\"Score: \" + str(sc))\n\n # Only save to file when new record.\n fname = \"Type{0} - {1}\".format(variant, sc)\n\n\n if sc > high_score:\n #read_write.write(fname, placed_houses)\n high_score = sc\n print (\"New high score ({0}) in loop: {1}\".format(sc, loops))\n print (\"Writing to file..\")\n\n return gr, placed_houses, sc", "def single_epoch(g,rows,cols,midpoint):\n\n num_top = 10 \n #3 for 8x8\n one_to_select = 0 \n top_nodes = g.top_n_nodes(num_top)\n '''\n for k in range(num_top):\n node_num = top_nodes[k]\n trip_list = g.node2trip_ids[node_num]\n print \"Next Midpoint: %d\" % k\n print node_num\n print g.node_to_coords(node_num)\n print \"Num trips: %d\" % len(trip_list)\n for i in range(len(trip_list)):\n trip_id = trip_list[i]\n line_num = g.trip_id2line_num[trip_id]\n p = Path(trip_id,g,line_num)\n \"\"\"\n print i\n print trip_id\n p.print_path()\n for i in range(p.graph.num_edges):\n if p.edges[i]:\n sys.stdout.write(\"%d, \" % (i + 1))\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"1s: \")\n for key in p.partials.keys():\n if p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n0s: \")\n for key in p.partials.keys():\n if not p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n\")\n #\"\"\"\n '''\n\n #trip_list = g.node2trip_ids[g.best_node]\n #midpoint = top_nodes[one_to_select]\n trip_list = g.node2trip_ids[midpoint]\n print \"Selected midpoint: %d\" % midpoint \n print g.node_to_coords(midpoint)\n out_file = open(\"datasets/full_data_%d_%d_%d.txt\" % (rows,cols,midpoint),'w')\n partial_file = open(\"datasets/partials_%d_%d_%d.txt\" % (rows,cols,midpoint), 'w')\n for i in range(len(trip_list)):\n trip_id = trip_list[i]\n line_num = g.trip_id2line_num[trip_id]\n p = Path(trip_id,g,line_num=line_num,midpoint=midpoint)\n \"\"\"\n print i\n print trip_id\n p.print_path()\n for i in range(p.graph.num_edges):\n if p.edges[i]:\n sys.stdout.write(\"%d, \" % (i + 1))\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"1s: \")\n for key in p.partials.keys():\n if p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n0s: \")\n for key in p.partials.keys():\n if not p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n\")\n \"\"\"\n out_string = str(p.edges)[1:-1]\n out_file.write(\"%s\\n\" % out_string)\n for i in range(p.graph.num_edges):\n if i in p.partials.keys():\n partial_file.write(\"%d\" % p.partials[i])\n else:\n partial_file.write(\"-1\")\n if i < p.graph.num_edges-1:\n partial_file.write(\",\")\n partial_file.write(\"\\n\")\n\n out_file.close()", "def _gen(self, datapoint_params) -> RecordThunkIter:\n\n def entrance(datapoints, params_list) -> RecordThunkIter:\n \"\"\"\n Return a generator iter througth an effectful generator\n iter through data will cause a network connection.\n \"\"\"\n size = len(params_list)\n effectful = zip(datapoints, params_list)\n\n def g():\n \"\"\" SIDE EFFECTFUL \"\"\"\n data, param = next(effectful)\n return ((MakeDict.make_spot_record(record, param)\n for record in data)\n if data is not None\n else iter([]))\n\n for _ in range(size):\n yield g\n\n datapoints = map(self._datapoint, datapoint_params)\n return entrance(datapoints, datapoint_params)", "def sequences(self):\n # i am one\n yield self\n # nothing further\n return", "def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list", "def next():\n iraw = 0\n run = 0\n seqno = 0\n with db_connection() as conn:\n with conn.cursor() as curs:\n try:\n curs.execute(\"\"\"SELECT rawdata.id,rawdata.run,rawdata.seqno,\n slices.block1,slices.block2,\n jobs.cluster,jobs.process\n FROM rawdata\n LEFT JOIN bindings\n ON bindings.iraw = rawdata.id\n INNER JOIN slices\n ON slices.iraw = rawdata.id\n LEFT JOIN jobs\n ON slices.ijob = jobs.id\n AND jobs.exitcode = 0\n WHERE bindings.id IS NULL\n ORDER BY rawdata.id,slices.block1\n LIMIT 2000;\n \"\"\")\n slices_missing = 1\n for row in curs.fetchall():\n i = int(row[0])\n if i != iraw:\n if slices_missing == 0:\n break\n else:\n run = int(row[1])\n seqno = int(row[2])\n slices_missing = 0\n slices = []\n iraw = i\n block1 = int(row[3])\n block2 = int(row[4])\n if row[5] is not None and row[6] is not None:\n cluster = int(row[5])\n process = int(row[6])\n slices.append((block1,block2,cluster,process))\n else:\n print(\"slices missing on\", row[5], row[6])\n slices_missing += 1\n if slices_missing:\n return 0\n else:\n curs.execute(\"SELECT TIMEZONE('GMT', NOW());\")\n now = curs.fetchone()[0]\n curs.execute(\"\"\"INSERT INTO bindings\n (iraw,starttime)\n VALUES (%s,%s)\n RETURNING id;\n \"\"\", (iraw, now))\n row = curs.fetchone()\n if row:\n ibind = int(row[0])\n else:\n return 0\n except:\n iraw = 0\n if iraw == 0:\n time.sleep(random.randint(1,30))\n return -9 # collision\n\n workdir = str(iraw)\n os.mkdir(workdir)\n os.chdir(workdir)\n badslices = []\n for sl in slices:\n sdir = str(sl[0]) + \",\" + str(sl[1])\n os.mkdir(sdir)\n tarfile = \"job_{0}_{1}.tar.gz\".format(sl[2], sl[3])\n tarpath = input_area + \"/\" + tarfile\n try:\n subprocess.check_output([\"gfal-copy\", src_url + tarpath,\n \"file://\" + os.getcwd() + \"/\" + tarfile])\n except:\n sys.stderr.write(\"Error -999 on rawdata id {0}\".format(iraw) +\n \" - job output \" + tarfile + \" is missing!\\n\")\n sys.stderr.flush()\n badslices.append(sdir)\n continue\n try:\n subprocess.check_output([\"tar\", \"zxf\", tarfile, \"-C\", sdir])\n except:\n sys.stderr.write(\"Error -999 on rawdata id {0}\".format(iraw) +\n \" - job output \" + tarfile + \" is not readable!\\n\")\n sys.stderr.flush()\n badslices.append(sdir)\n finally:\n os.remove(tarfile)\n if len(badslices) > 0:\n with db_connection() as conn:\n with conn.cursor() as curs:\n curs.execute(\"SELECT TIMEZONE('GMT', NOW());\")\n now = curs.fetchone()[0]\n curs.execute(\"\"\"UPDATE bindings\n SET endtime=%s, \n exitcode=%s,\n details=%s\n WHERE id = %s;\n \"\"\", (now, -999, \":\".join(badslices), ibind))\n os.chdir(\"..\")\n shutil.rmtree(workdir)\n return 1\n\n badslices += merge_evio_skims(run, seqno, slices)\n badslices += merge_hddm_output(run, seqno, slices)\n badslices += merge_job_info(run, seqno, slices)\n badslices += merge_root_histos(run, seqno, slices)\n exitcode = -len(badslices)\n with db_connection() as comm:\n with conn.cursor() as curs:\n curs.execute(\"SELECT TIMEZONE('GMT', NOW());\")\n now = curs.fetchone()[0]\n curs.execute(\"\"\"UPDATE bindings\n SET endtime=%s,\n exitcode=%s,\n details=%s\n WHERE id = %s;\n \"\"\", (now, exitcode, \":\".join(badslices), ibind))\n os.chdir(\"..\")\n shutil.rmtree(workdir)\n return 1", "def start_method(self) -> None:\n if self._check_number_of_bells():\n self.row_generator.reset()\n self.start_next_row()", "def grow_scaffold(scaffold, o_dict, u_cont, r_cont):\n free_end = True\n current_end = scaffold.pop()\n \n \n while free_end:\n \n next_contig_dict = o_dict[current_end]\n right_contig = \"link_null\"\n \n if len(next_contig_dict) == 1:\n next_contig = list(next_contig_dict.keys())[0]\n next_end = other_end(next_contig)\n scaffold.extend([current_end, next_contig])\n \n if len(scaffold) > 1 and next_end == scaffold[1]:\n right_contig = \"link_to_start_(2)\"\n free_end = False\n elif next_end in u_cont:\n\n if next_end in scaffold:\n right_contig = \"link_loop\"\n free_end = False\n else:\n current_end = next_end\n right_contig = other_end(next_end)\n\n else: \n if next_end in r_cont and next_end in o_dict: \n right_contig = trio_hits(current_end, next_end, hit_list, o_dict, contigs_dict) \n if right_contig in unique_contigs_master and right_contig in scaffold: \n right_contig = \"link_duplicate\"\n else: \n right_contig = \"link_not_found\"\n\n if right_contig[0:4] != \"link\":\n candidate = other_end(right_contig)\n if not candidate in scaffold:\n scaffold.extend([next_end, right_contig])\n\n if candidate in o_dict and len(o_dict[candidate]) > 0:\n current_end = candidate\n else: \n right_contig = \"link_not_in_overlaps_(2)\"\n else:\n right_contig = \"link_in_scaffold\"\n \n if right_contig[0:4] == \"link\":\n scaffold.append(right_contig)\n free_end = False\n \n else:\n if len(next_contig_dict) == 0:\n scaffold.append(\"link_not_in_overlaps\")\n elif len(next_contig_dict) >1:\n scaffold.append(\"link_ambiguous_(2)\")\n else: scaffold.append(\"link_other\") #should never happen! \n free_end = False \n \n return scaffold", "def cycle_loader(loader, device):\n while True:\n for batch in loader:\n # NOTE this is an adhoc solution\n batch.src = (batch.src[0].to(device), batch.src[1].to(device))\n batch.tgt = batch.tgt.to(device)\n logit, indices = batch.bert_topk\n batch.bert_topk = (logit.to(device), indices.to(device))\n yield batch", "def step_through_generations(self, num_steps):\n for island in self._islands:\n for _ in range(num_steps):\n island.execute_generational_step()\n self.archipelago_age += num_steps", "def is_generate_per_split(self):\n return True" ]
[ "0.66333145", "0.62098485", "0.61860573", "0.59795815", "0.5897099", "0.58752435", "0.57888967", "0.56387043", "0.56354517", "0.5580544", "0.55045325", "0.5491345", "0.5488172", "0.5473442", "0.5453452", "0.5444366", "0.5409794", "0.5389145", "0.5388282", "0.5374205", "0.53662616", "0.5350202", "0.5286014", "0.52452254", "0.52422255", "0.5235022", "0.52184397", "0.5186023", "0.5172175", "0.5166774", "0.5157133", "0.5154998", "0.51374197", "0.5112792", "0.5100738", "0.5097566", "0.5088883", "0.5088309", "0.50824475", "0.50602335", "0.5053064", "0.5052954", "0.5034485", "0.5028705", "0.50252694", "0.50229806", "0.5010326", "0.5007821", "0.4995043", "0.4994627", "0.49897194", "0.4987335", "0.49817812", "0.49784565", "0.4970886", "0.49660543", "0.49632606", "0.4959547", "0.4955111", "0.49541983", "0.49469912", "0.4946398", "0.4941565", "0.49394006", "0.4938736", "0.49382603", "0.49354756", "0.49255472", "0.49215558", "0.49211678", "0.4918371", "0.49181655", "0.49171445", "0.49146554", "0.491334", "0.49111915", "0.49107116", "0.49087593", "0.4908623", "0.49071443", "0.48956537", "0.48888317", "0.48865154", "0.48864365", "0.488558", "0.4877284", "0.4874521", "0.4871078", "0.4860098", "0.4858776", "0.48530775", "0.48511475", "0.4849492", "0.48478612", "0.48438582", "0.48341963", "0.48323756", "0.4830541", "0.48198643", "0.48192677", "0.48149708" ]
0.0
-1
Generating number of forests (it's random in some frame).
def _generate(self, input_row, output_row): self._fullInput = input_row self.power = self.settings.population_count self._fullOutput = output_row for one_forest in range(self.power): self._forests.append(OneForest(self.settings, input_row=self._fullInput, full_output=self._fullOutput))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def numberOfNodes( gen ):\n return int( scipy.sum( [ 3.**i for i in range( 1, gen + 2 ) ] ) )", "def set_rf_samples(n):\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n))", "def reset_rf_samples():\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n_samples))", "def __init__(self, n_samples=1000, n_features=4):\n self.n_samples = 1000\n self.n_features = 4\n self.forest = []", "def num_trials(self):", "def grow_forest( n, records ):\n dataset = Dataset( records )\n record_number = dataset.size\n\n dts = []\n for i in xrange(n):\n print \"Training\", i\n # pick randomly as many records as the number in the dataset.\n picked_records = []\n for j in xrange( record_number ):\n ind_picked = randint(0, record_number-1)\n picked_records.append( dataset[ ind_picked ] )\n picked_records = Dataset( picked_records )\n # train a tree with these records and add it to the forest\n tree = train(picked_records)\n dts.append( tree )\n return dts", "def sample_count(self):", "def backtrack_steps():\n\n # Initialize position and number of steps\n x = 0\n n_steps = 0\n\n # Walk until we get to positive 1\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n\n return n_steps", "def n_file_elements(cls):\n \n return randint(1, (3 * Root.size))", "def count_gold(pyramid):\n\n #replace this for solution\n return 0", "def random(self, n=1):\n # self.num_generated += n", "def n_folder_elements(cls):\n \n return randint(1, (2 * Root.size))", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def num_training_examples(self):", "def part_1() -> int:\n initial_input = _load_input()\n rows = len(initial_input)\n cols = len(initial_input[0])\n\n input = initial_input.copy()\n total_glow_count = 0\n\n for _ in range(100):\n flashed = list()\n for row in range(rows):\n for col in range(cols):\n coords = [[col, row]]\n new_input, glow_count = _get_glow_counts(coords, input, flashed)\n input = new_input\n total_glow_count += glow_count\n\n return total_glow_count", "def number_of_iterations(self) -> int:\n pass", "def nb_triples(self) -> int:\n return 0", "def count_frames(f):\n def counted(n):\n counted.open_count += 1\n counted.max_count = max(counted.max_count, counted.open_count)\n result = f(n)\n counted.open_count -= 1\n return result\n counted.open_count = 0\n counted.max_count = 0\n return counted", "def foxGrowth():\n # you need these lines for modifying global variables\n global CURRENTRABBITPOP\n global CURRENTFOXPOP\n \n for fox in range(CURRENTFOXPOP):\n fox_eat_prob = float(CURRENTRABBITPOP) / MAXRABBITPOP\n if random.random() < fox_eat_prob and CURRENTRABBITPOP > 10:\n CURRENTRABBITPOP -= 1\n if random.random() < (1.0 / 3.0):\n CURRENTFOXPOP += 1\n else: \n if random.random() < (1.0 / 10.0) and CURRENTFOXPOP > 10:\n CURRENTFOXPOP -= 1", "def num_wires(self):", "def _getNumcam( self, bSeed ):\n\n\t\treturn ( ( bSeed >> 20 ) & 0xF ) + 1", "def number_of_sample_loops(self) -> int:\n return self.__number_of_sample_loops", "def build_random_forest(X_train, y_train):", "def count():", "def initial_forest(self):\n return TamariIntervalPoset(self.size(), self.increasing_cover_relations())", "def test_rand_100_depth_remains_less_than_8():\n from bbst import Bst\n from random import shuffle\n max_depth = 0\n for x in range(10):\n rando = [x for x in range(100)]\n shuffle(rando)\n tree = Bst(rando)\n tree_depth = tree.depth()\n if tree_depth > max_depth:\n max_depth = tree_depth\n assert max_depth == 8", "def n_train(self):\n return self.factors[0].shape[0]", "def foxGrowth():\r\n # you need these lines for modifying global variables\r\n global CURRENTRABBITPOP\r\n global CURRENTFOXPOP\r\n\r\n \r\n \r\n for fox in range(CURRENTFOXPOP):\r\n if CURRENTRABBITPOP > 10:\r\n prob_fox_eats_rabbit = float(CURRENTRABBITPOP)/MAXRABBITPOP\r\n if prob_fox_eats_rabbit > random.random():\r\n CURRENTRABBITPOP -= 1\r\n if 1.0/3.0 > random.random():\r\n CURRENTFOXPOP += 1\r\n elif CURRENTFOXPOP > 10:\r\n if 0.9 > random.random():\r\n CURRENTFOXPOP -= 1", "def n_times_comeback(self):\n # OR Poisson or Gaussian\n loc = self.get_mean_n_times_comeback()\n scale = self.dev_n_times_comeback\n return int(np.around(np.random.normal(loc=loc, scale=scale, size=1)))", "def n_rays(self):\n try: \n return self._n_rays\n except AttributeError:\n self._n_rays = 0\n for r in self.rays(): self._n_rays += 1\n return self._n_rays", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def N_stages(self):\n return 5", "def fs_n():\n return TEST_PREF + \"fs\" + random_string()", "def game_counts(n_back=20):\n all_models = gfile.Glob(os.path.join(MODELS_DIR, '*.meta'))\n model_filenames = sorted([os.path.basename(m).split('.')[0]\n for m in all_models], reverse=True)\n for m in model_filenames[:n_back]:\n games = gfile.Glob(os.path.join(SELFPLAY_DIR, m, '*.zz'))\n print(m, len(games))", "def foxGrowth():\r\n # you need these lines for modifying global variables\r\n global CURRENTRABBITPOP\r\n global CURRENTFOXPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTFOXPOP):\r\n if CURRENTRABBITPOP > 10:\r\n if random.random() <= (CURRENTRABBITPOP/MAXRABBITPOP):\r\n CURRENTRABBITPOP -= 1\r\n # fox reproducing\r\n if random.random() <= (1/3):\r\n CURRENTFOXPOP += 1\r\n else:\r\n # fox dying\r\n if random.random() <= 0.1:\r\n CURRENTFOXPOP -= 1", "def branching_factor(self, num_samples=0):\n\t\tif num_samples == 0: num_samples = self.default_num_samples\n\t\tsuccesful_samples = 0\n\t\tbranches = 0\n\t\tfor i in range(num_samples):\n\t\t\ttitle = choice(index.keys()) # get random article title\n\t\t\ttry:\n\t\t\t\tart = Page(title)\n\t\t\t\tbranches += len(art.links())\n\t\t\t\tsuccesful_samples += 1\n\t\t\texcept Exception, msg:\n\t\t\t\tif debug:print \"bad article: \" + str(msg)\n\t\treturn (float(branches)/succesful_samples, succesful_samples)", "def fisher_factor_inner_shape(self) -> Sequence[int]:\n pass", "def n_trees(self):\n return len(self.data_kd)", "def get_num_classes(self):", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def foxGrowth():\r\n # you need these lines for modifying global variables\r\n global CURRENTRABBITPOP\r\n global CURRENTFOXPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTFOXPOP):\r\n if CURRENTRABBITPOP > 10:\r\n if random.random() <= (CURRENTRABBITPOP/MAXRABBITPOP):\r\n CURRENTRABBITPOP -= 1\r\n # fox reproducing\r\n if random.random() <= (1/3):\r\n CURRENTFOXPOP += 1\r\n else:\r\n # fox dying\r\n if random.random() <= 0.9:\r\n CURRENTFOXPOP -= 1", "def fill_count(nid):\n n_edges = G.subgraph(G.neighbors(nid)).number_of_edges()\n deg = G.degree[nid]\n n_fill = deg*(deg-1)//2 - n_edges\n return n_fill", "def generate_feature_counts(traj, mdp):\n #count each time a state was visited \n counts = Counter({feature:0 for feature in mdp.features})\n for state,action in traj:\n counts[mdp.observe_features(state)] += 1\n \n return [counts[feature] for feature in mdp.features]", "def final_forest(self):\n return TamariIntervalPoset(self.size(), self.decreasing_cover_relations())", "def num_depth(self):\n if self._index == 0:\n return len(self._ratios)\n else:\n return len(self._sizes) + len(self._ratios) - 1", "def part_2() -> int:\n initial_input = _load_input()\n rows = len(initial_input)\n cols = len(initial_input[0])\n\n input = initial_input.copy()\n step_num = 0\n\n while True:\n flashed = list()\n step_glow_count = 0\n\n step_num += 1\n\n for row in range(rows):\n for col in range(cols):\n coords = [[col, row]]\n new_input, glow_count = _get_glow_counts(coords, input, flashed)\n input = new_input\n step_glow_count += glow_count\n\n if step_glow_count == 100:\n break\n\n return step_num", "def get_number_of_models():\n return 8", "def get_faces_nr(self):\r\n\r\n logger.debug('Getting number of faces in each frame')\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n\r\n if os.path.exists(self.track_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n with open(self.track_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n logger.warning('No tracking results found!')\r\n\r\n return\r\n\r\n self.faces_nr = {}\r\n\r\n for segment_dict in self.tracked_faces:\r\n\r\n frame_list = segment_dict[c.FRAMES_KEY]\r\n\r\n for frame_dict in frame_list:\r\n\r\n frame_name = frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n if frame_name in self.faces_nr:\r\n\r\n self.faces_nr[frame_name] += 1\r\n\r\n else:\r\n\r\n self.faces_nr[frame_name] = 1\r\n\r\n # Save YAML file\r\n\r\n utils.save_YAML_file(self.faces_nr_path, self.faces_nr)", "def _number_of_intervals(self):\n return self._number_of_levels - 1", "def draw_250():\r\n red = 0\r\n grn = 0\r\n arr = urn_setup()\r\n for v in range(250):\r\n n = np.random.randint(0,len(arr))\r\n x, arr = draw_remove_element(arr, np.random.randint(0,len(arr)))\r\n if x == 'R':\r\n red += 1\r\n elif x == 'G':\r\n grn += 1\r\n print(\"Red: %d :: Grn: %d\" % (red, grn))\r\n return red, grn", "def __dice_generator(self):\n self.current_dice = np.random.randint(1, 6 + 1)", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def ticktock(self): # controller\n for tractor in self.tractors:\n try:\n next(tractor) # state changer\n except StopIteration:\n pass # harmless stuck tractor signal\n\n self.framenumber += 1\n return self.framenumber", "def __len__(self):\n return 9 # logsfr_ratios has 6 bins", "def get_num_of_images(self):", "def __len__(self):\n i = 0\n for S in self.states():\n i += 1\n return i", "def _good_turing_new_c(self, count: int) -> float:\n next_count_index = count + 1\n next_count: Optional[float] = None\n if next_count_index not in self.count_map:\n # this happens when N_{c+1} is 0\n # this can make the total probability not equal to 1\n next_count = 0.\n else:\n next_count = float(self.count_map[next_count_index])\n\n new_count: Optional[float] = None\n new_count = (count + 1) * next_count / self.count_map[count]\n return new_count", "def count_unique_features(self):\n return N_UNIQUE_FEATS", "def num_faces(self):\n return self._top_exp.number_of_faces()", "def rand_order_size():\n return poisson(2.0) + 1", "def num_classes():\n return NUM_CLASSES", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def randomWins(RNA, wsize, step, trials):\n counter = 0\n rnatrialscores = []\n while counter < trials:\n newRNA = randSeq(RNA)\n rnatrialscores.append(rnaWin(newRNA, wsize, step)[0])\n counter += 1 \n \n return rnatrialscores", "def num_run_trajs(self, run_idx):\n return len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])", "def num_eval_instances(self):\n return self.num_train_instances // 4", "def determine_size(self):\n size = np.inf\n while size >= self.n:\n size = np.random.pareto(0.2)\n size = int(math.ceil(size))\n return size", "def totem_random():\n random_head()\n random_head()\n random_head()", "def variable_ranking(self):\n self.grow_trees()\n dist_classes = self.dist_classes\n oob = self.forest.oob_set_generator()\n oob_length, First, elt_vals, var_vals = len(oob), True, {}, {}\n succ_rate, dist_succ_rate, dist_order = 0, 0, 0\n for var in self.variables:\n var_range = list(variable_range(self.data, var))\n range_len = len(var_range)\n print var\n permution = None\n permuted_succ, perm_dist_succ = 0, 0\n for elts in oob:\n if First:\n actual = self.data[elts][self.prediction_index]\n elt_vals[elts] = actual\n predicted = self.forest.test_predict(self.data[elts], elts)\n if actual in dist_classes:\n dist_order += 1\n if actual == predicted:\n succ_rate += 1\n if actual in dist_classes:\n dist_succ_rate += 1\n if var[1] == 'd':\n permution = int(math.floor(uniform(0, 1)*range_len))\n permution = var_range[permution]\n else:\n permution = uniform(0, 1)*(var_range[1] - var_range[0])\n perm_tuple = self.data[elts][:var[0]] + [permution] + self.data[elts][var[0]+1:]\n permuted_prediction = self.forest.predict(perm_tuple)\n actual = elt_vals[elts]\n if actual == permuted_prediction:\n permuted_succ += 1\n if actual in dist_classes:\n perm_dist_succ += 1\n if First:\n succ_rate = float(succ_rate)/oob_length\n dist_succ_rate = float(dist_succ_rate)/dist_order\n First = False\n permuted_succ = float(permuted_succ)/oob_length\n perm_dist_succ = float(perm_dist_succ)/dist_order\n print \"Originally a \", succ_rate, \" success rate, with permution to \", permuted_succ\n print \"A difference of \", succ_rate - permuted_succ\n print \"WRT Distinguised classes, a success rate of:\", dist_succ_rate, 'with permution to ', perm_dist_succ\n print \"A difference of \", dist_succ_rate - perm_dist_succ\n var_vals[var] = succ_rate - permuted_succ\n var_vals[(var, 'd')] = dist_succ_rate - perm_dist_succ\n var_vals = sorted(var_vals.items(), key=lambda x: x[1], reverse=True)\n for x in var_vals:\n print x[0], x[1]", "def replace_random_passes(gm: torch.fx.GraphModule):\n if config.fallback_random:\n return 0\n\n count = patterns.apply(gm)\n count += fuse_seed_creation_pass(gm.graph)\n\n return count", "def getNumFactorys(self, row: int) -> int:\n ...", "def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list", "def get_n_features(self):\n # +1 due to dummy bit\n return self.model.n_latent_features + 1", "def __len__(self):\n return 1 + len(self.features)", "def make_transition_probs(self):\n n = len(self.speakers) # TODO why this line ???\n transitions = np.random.randint(5, size=(n, n)) + 1\n transitions += transitions.transpose()\n for i in range(0, math.floor(n / 2)):\n s1 = np.random.randint(n)\n s2 = np.random.randint(n)\n transitions[s1][s2] += 10\n transitions[s2][s1] += 8\n return(transitions)", "def generate(self, num_leafs):\n leafs = self.get_leafs()\n for _ in range(num_leafs):\n box = leafs[np.random.choice(len(leafs))]\n leafs.remove(box)\n ch0, ch1 = box.split()\n self.add_edge(box, ch0)\n self.add_edge(box, ch1)\n leafs.append(ch0)\n leafs.append(ch1)", "def generate_close_count(self):\n return 0", "def monkey_count(n):\n return [i for i in range(1, n + 1)]", "def num_frames(self):\n return self._first_rgb.shape[1]", "def get_number_of_training(self):\n return self.n_train", "def growForest(config, load_exp_file=True):\n\n silent = config.get('silent', False)\n experiment_Path = r\"C:\\Users\\user\\Desktop\\Prediction_model\\experiment\\flood.exp\"\n\n if load_exp_file:\n #loadExperimentFile(config, filename=config.exp)\n loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n\n forests = []\n results = []\n\n\n # do multiple runs if needed. note that we start at config.run, not zero\n for run in range(config.num_runs):\n training_graphs, testing_graphs = splitDict(config.graphs, int(len(config.graphs) * .8), random=True)\n\n \"\"\"\n # perform under-sampling if needed\n if hasattr(config, 'underlabel'):\n under_graphs = {}\n skip_count = 0\n for k in training_graphs.keys():\n if training_graphs[k].class_label == config.underlabel and random.random() <= config.underval:\n skip_count += 1\n else:\n under_graphs[k] = training_graphs[k]\n print('Undersampled ' + str(skip_count) + ' graphs')\n training_graphs = under_graphs\n \"\"\"\n # print out some useful info on the class distribution\n counts = defaultdict(int)\n for graph in training_graphs.values():\n counts[graph.class_label] += 1\n print('training:', len(training_graphs), counts)\n\n counts = defaultdict(int)\n for graph in testing_graphs.values():\n counts[graph.class_label] += 1\n print('testing:', len(testing_graphs), counts)\n\n for graph in training_graphs.values():\n counts[graph.class_label] += 1\n print('total:', len(config.graphs), counts)\n\n print('\\nrun:', run)\n config.run = run\n\n srrf = SRRForest(config)\n #srrf.growForest(training_graphs)\n srrf.growForest(config.graphs)\n forests.append(srrf)\n #srrf.training_graph_ids = list(training_graphs.keys())\n #training_labeling = srrf.labelGraphs(training_graphs,config.time_list)\n #outOfBagLabels=srrf.getOutOfBagLabels()\n #print(\"outOfBagLabels\")\n #print(outOfBagLabels)\n #c=srrf.compute_oob_score(training_graphs, outOfBagLabels)\n #print(\"concordance index:\")\n #print(c)\n config.saveTrees(srrf)\n\n #results.append(c)\n\n\n\n\n \"\"\"\n\n df = pd.DataFrame(columns=['lon', 'lat', 'survival_probability', 'time'])\n\n\n srrf.testing_graph_ids = testing_graphs.keys()\n testing_labeling = srrf.labelGraphs(testing_graphs,config.time_list)\n\n\n\n\n\n\n\n for i,h in testing_labeling.items():\n\n lat = i.graph.attributes_by_type.get(('cell', 'lat'))[0].value\n lon = i.graph.attributes_by_type.get(('cell', 'lon'))[0].value\n for t, label in h.items():\n df = df.append(\n {'lon': lon, 'lat': lat, 'survival_probability': label[1], 'time': t},\n ignore_index=True)\n\n sort_by_time = df.sort_values('time')\n print(sort_by_time.head())\n import plotly.express as px\n fig = px.scatter_mapbox(sort_by_time, lat=\"lat\", lon=\"lon\", hover_data=[\"survival_probability\"],\n color=\"survival_probability\", animation_frame=\"time\", animation_group=\"time\",\n color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10, height=500)\n fig.update_layout(mapbox_style=\"open-street-map\")\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n fig.show()\n \"\"\"\n\n\n\n #config.saveTrees((srrf,)) ###config.saveTree is giving us an eror type error: unable to pickle dict keys.\n\n #print('numruns: %s' % (config.num_runs))\n #print(results)\n\n\n #return results", "def num_trajs(self):\n return len(list(self.run_traj_idx_tuples()))", "def act(self):\n return np.random.randint(self.k)", "def generate_flowsize(self):\n random_flowsize = round(random.uniform(0.0, 200.0), 3)\n assert isinstance(random_flowsize, float)\n return random_flowsize", "def sample_count(self):\n assert len(self.decay_x) == len(self.decay_y)\n return len(self.decay_x)", "def get_marble_count(self):", "def seed():", "def nbr_nodes(tree_depth):\n return 2**(tree_depth+1)-1", "def MakeAllScenarioTreeNodes(model, bf):\n TreeNodes = dict()\n TreeNodes[\"ROOT\"] = scenario_tree.ScenarioNode(\"ROOT\",\n 1.0,\n 1,\n model.StageCost[1],\n None,\n [model.Pgt[1],\n model.Pgh[1],\n model.PDns[1],\n model.Vol[1]],\n model)\n for b in range(bf):\n ndn = \"ROOT_\"+str(b)\n TreeNodes[ndn] = scenario_tree.ScenarioNode(ndn,\n 1.0/bf,\n 2,\n model.StageCost[2],\n None,\n [model.Pgt[2],\n model.Pgh[2],\n model.PDns[2],\n model.Vol[2]],\n model,\n parent_name=\"ROOT\")", "def num_trees(self):\n return self._ll_tree_sequence.get_num_trees()", "def cumula():\n f = open('cumul_depths.tmp','w+')\n model = np.loadtxt('start_model.dat',dtype={'names': ('H'),'formats': \\\n ('f4')}, skiprows=1,usecols=[0])\n \n cumul = 0\n for i, d in enumerate(model['H'][:-1]): \n # le dernier est souvent le semi espace donc pas besoin (tres prof)\n cumul +=d\n print (\"layer \", i, \" - depth: \", cumul)\n f.write(\"%s\\n\" % cumul)\n f.close()", "def _rep(self, num_repeats):\n return int(np.ceil(self.depth_mul * num_repeats))", "def ggn_factor_inner_shape(self) -> Sequence[int]:\n pass", "def count_waters(self):\n n = 0\n for frag in self.iter_waters():\n n += 1\n return n", "def num_classes(self):\n\t\treturn 10", "def num_layers(self): # -> int:\n ...", "def number_of_electrodes(self):\n return self._pre_kernel.shape[1]", "def toboggan_trees(input_file: str, steps_across: int, steps_down: int) -> int:\n position = {\"x\": 0, \"y\": 0}\n number_of_trees = 0\n with open(f\"inputs/{input_file}\") as f:\n for line in f.readlines():\n if position[\"y\"] % steps_down:\n position[\"y\"] += 1\n continue\n\n if line[position[\"x\"] % len(line.strip())] == \"#\":\n number_of_trees += 1\n position[\"x\"] += steps_across\n position[\"y\"] += 1\n\n return number_of_trees", "def max_depth_forest(self):\n return max(x.tree_.max_depth for x in self.result.estimators_)", "def rollout(self, env, cur_depth):\n total_r = 0\n for i in range(self.max_rollout - cur_depth):\n next_act = np.random.randint(self.actions)\n _, _, r, _ = env.step(next_act)\n total_r += r\n return total_r", "def sample(self):\n L = e ** (-self.lamb)\n k, p = 1, rand()\n while p > L:\n k += 1\n p *= rand()\n return k - 1" ]
[ "0.60863966", "0.60740316", "0.6011651", "0.59940726", "0.5975096", "0.59473723", "0.5862948", "0.583307", "0.5804934", "0.5777276", "0.5736017", "0.57318246", "0.57276255", "0.5683988", "0.5664078", "0.56328785", "0.56160444", "0.56090194", "0.55513465", "0.5541817", "0.55397755", "0.55363965", "0.5535588", "0.55245054", "0.55223686", "0.55165756", "0.5514933", "0.5513617", "0.5508632", "0.5501778", "0.5486266", "0.54854536", "0.5435573", "0.5429444", "0.5423009", "0.5419211", "0.5417288", "0.54107744", "0.54007345", "0.5399992", "0.5392781", "0.5384895", "0.53677493", "0.53588444", "0.5353876", "0.5352053", "0.5351663", "0.5350437", "0.5347887", "0.5346748", "0.53366745", "0.5331971", "0.5331971", "0.5331454", "0.5328818", "0.5328395", "0.5327088", "0.5326336", "0.5320581", "0.53203475", "0.5318675", "0.53143114", "0.53078115", "0.5306083", "0.5303565", "0.530326", "0.5290313", "0.52893025", "0.5277634", "0.5276897", "0.5276443", "0.52759314", "0.527495", "0.5274573", "0.5273598", "0.5270311", "0.5266479", "0.5255773", "0.5252314", "0.5251867", "0.5246975", "0.52437323", "0.52377206", "0.523614", "0.5232421", "0.5224644", "0.52222574", "0.5220132", "0.5217755", "0.52154785", "0.5215161", "0.52122146", "0.5203994", "0.51947635", "0.5194311", "0.5192398", "0.5187865", "0.5181692", "0.51806664", "0.5178423", "0.517588" ]
0.0
-1
Spawning next generation of collection by selecting n pairs of distinct forests from previous generation and them over.
def _next_generation(self, previous_generation): self._fullInput, self._fullOutput = previous_generation.get_data() self.power = self.settings.population_count for forest_iteration in range(self.power): first, second = previous_generation.selection() print 'selected for crossover ->', first.fitness, second.fitness self._forests.append(OneForest(self.settings, first_forest=first, second_forest=second))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def _next_gen(self):\n\n selected = self.select()\n offspring = self.population.mate(mating_individuals=selected)\n self.population.delete(np.arange(len(self.population.individuals)))\n self.population.add(offspring)\n self._current_gen_count += 1\n self._gen_count_in_curr_iteration += 1\n self._function_evaluation_count += offspring.shape[0]", "def next_generation(self, population):\n pass", "def evolve(self, generations=10000):\n\n for gen in range(generations):\n # run the tournament\n self.tournament()\n\n # generate the next generation\n self.p = self.nextGen()", "def nextGeneration(self):\n # select two parents from the current generation.\n parent_1 = self.selection()\n parent_2 = self.selection()\n # to not get the same parents.\n _ = 0\n while _ < 30 and parent_2 == parent_1:\n parent_2 = self.selection()\n _ += 1\n # apply crossover on those parents (crossover_rate chance).\n crossover_chance = random.uniform(0, 1)\n parents = [parent_1, parent_2]\n if crossover_chance <= self.crossoverRate:\n offspring = self.crossover(parents)\n else:\n return \n # apply mutations on the new offspring (mutation_rate chance).\n mutation_chance = random.uniform(0, 1)\n newoffspring = offspring\n if mutation_chance <= self.mutationRate:\n newoffspring = self.mutation(offspring)\n # replace one of the parents in the new generation, given the loser parent.\n self.replaceLoser(parents, newoffspring)\n\n # now the new generation is available in the self.currentGeneration", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])", "def next_generation(self):\r\n self.calculate_stats()\r\n\r\n self.population = []\r\n\r\n # Getting amounts for different types of neural net replacements\r\n random_size = self.random_round(self.population_size * self.settings[\"random_offspring\"])\r\n elitism_size = self.random_round(self.population_size * self.settings[\"elitism_offspring\"])\r\n crossover_size = self.population_size - random_size - elitism_size\r\n\r\n # Keeping best neural nets (elitism)\r\n self.population.extend(self.sorted_population[i].copy() for i in range(elitism_size))\r\n\r\n # Adding neural nets with crossover\r\n\r\n probs = self._get_selection_probabilities()\r\n crossovers = (self._uniform_crossover(*np.random.choice(self.sorted_population, 2, replace=False, p=probs)) for _ in range(crossover_size))\r\n self.population.extend(crossovers)\r\n\r\n # Mutating neural nets\r\n for neural_net in self.population:\r\n if np.random.rand() < self.settings[\"mutation_rate\"]:\r\n neural_net.mutate(self.settings[\"mutation_chance\"], self.settings[\"mutation_amount\"])\r\n\r\n # Adding random nets\r\n self.population.extend(self._random_child() for _ in range(random_size))\r\n\r\n # Shuffling new population\r\n np.random.shuffle(self.population)\r\n\r\n # Increment current generation\r\n self.current_generation += 1", "def _next_generation(self, ranks):\n replace = ranks[:int(self.population_size * self.culling)]\n for idx in replace:\n self.population[idx] = self._create_offspring()", "def grow_forest( n, records ):\n dataset = Dataset( records )\n record_number = dataset.size\n\n dts = []\n for i in xrange(n):\n print \"Training\", i\n # pick randomly as many records as the number in the dataset.\n picked_records = []\n for j in xrange( record_number ):\n ind_picked = randint(0, record_number-1)\n picked_records.append( dataset[ ind_picked ] )\n picked_records = Dataset( picked_records )\n # train a tree with these records and add it to the forest\n tree = train(picked_records)\n dts.append( tree )\n return dts", "def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population", "def next_generation(self):\n new_population = self.population.copy()\n new_length = self.tour_length.copy()\n for i in range(self.loops):\n order_a = self.pick_one()\n order_b = self.pick_one()\n order = self.crossover(order_a, order_b)\n order_length = self.distance(order)\n new_population[i], new_length[i] = self.mutate(order_length, order)\n if new_length[i] < self.worst:\n self.tour_length[self.worst_pos] = new_length[i]\n self.population[self.worst_pos] = new_population[i]\n self.fitness[self.worst_pos] = 1/new_length[i]\n self.normalise()\n self.worst = 0\n for j in range(self.loops):\n if self.worst < self.tour_length[j]:\n self.worst = self.tour_length[j]\n self.worst_pos = j\n return new_population, new_length", "def generate_next_generation(environment, population, adaptive_mutation):\n\t# generate pairs of parents that can be used for recombination\n\tparent_pairs = parent_selection_ranking(population, num_pairs=len(population)*4)\n\n\t# generate offspring\n\toffspring = []\n\tfor i in range(len(parent_pairs)):\n\t\tchildren = create_offspring(environment, parent_pairs[i][0], parent_pairs[i][1], adaptive_mutation, num_offspring=1)\n\t\toffspring += children # concatenate children to offspring list\t\n\n\tnew_population = survival_selection_top(offspring, len(population))\n\treturn new_population", "def generation_next(prev_gen):\n next_gen = []\n\n # Iter through list of graphs\n for original_graph in prev_gen:\n # Select edges to nodes which are at distance 2\n select_edges = dist2_nodepairs(original_graph)\n\n # Go through the list of possible selected edges and add one\n for test_edge in select_edges:\n test_graph = original_graph.copy()\n test_graph.add_edge(*test_edge)\n if (not graph_exists(test_graph, next_gen)) \\\n and check_test_graph(test_graph):\n next_gen.append(test_graph)\n\n return next_gen", "def next_population():\n result = [best]\n while len(result) < population_size:\n chromosomes = crossover(tournament(), tournament()) if random() < crossover_rate else [tournament()]\n for chromosome in chromosomes:\n for i in range(box_count):\n if random() < mutation_rate:\n j = randrange(box_count)\n (chromosome[i], chromosome[j]) = (chromosome[j], chromosome[i])\n result.append(Individual(evaluate(chromosome), chromosome))\n return result[:population_size]", "def star_topology(random, population, args):\r\n for _ in range(len(population)):\r\n yield population[:]", "def create_next_gen(self, parents_sreprs_couple):\n child0, child1 = self.recombine(parents_sreprs_couple[0], parents_sreprs_couple[1])\n if random.random() < self.mutate_prob:\n child0 = self.mutate(child0)\n if random.random() < self.mutate_prob:\n child1 = self.mutate(child1)\n\n return child0, child1", "def run(self, generations=1000):\n gcount = 0\n \n while gcount<=generations:\n try:\n print \"Gen: \"+str(gcount),\n self.population = zip (self.population, [self.target]*len(self.population))\n self.population = self.pool.map(f, self.population)\n except:\n pass\n for i in self.population:\n print i[0],i[1]\n self.population = [organism.Organism(x[0], x[1]) for x in self.population]\n self.population.sort()\n print \" Max fitness: \"+str(self.population[::-1][1].fitness)\n try:\n if self.population[0] <= self.ppop[0]:\n self.ppop = self.population[::-1][0:10] # The top ten organisms\n else:\n self.population = self.ppop # We got worse! go back!\n except:\n self.ppop = self.population\n self.population = self.population[::-1][0:10]\n try:\n self.breed()\n except:\n print \"Breeding error\"\n gcount+=1", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def _pop_random_n(entities: np.array, weights: np.array, count: int = 3):\n for _ in range(count):\n if not len(entities):\n return\n\n choice, entities, weights = _pop_random(entities, weights)\n yield choice", "def step(individuals, grammar, replacement, selection, fitness_function, best_ever):\n #Select parents\n parents = selection(individuals)\n #Crossover parents and add to the new population\n new_pop = []\n while len(new_pop) < GENERATION_SIZE:\n new_pop.extend(onepoint_crossover(*random.sample(parents, 2)))\n #Mutate the new population\n new_pop = list(map(int_flip_mutation, new_pop))\n #Evaluate the fitness of the new population\n evaluate_fitness(new_pop, grammar, fitness_function)\n #Replace the sorted individuals with the new populations\n individuals = replacement(new_pop, individuals)\n best_ever = max(best_ever, max(individuals))\n return individuals, best_ever", "def generate(self, num_leafs):\n leafs = self.get_leafs()\n for _ in range(num_leafs):\n box = leafs[np.random.choice(len(leafs))]\n leafs.remove(box)\n ch0, ch1 = box.split()\n self.add_edge(box, ch0)\n self.add_edge(box, ch1)\n leafs.append(ch0)\n leafs.append(ch1)", "def nth_iteration(Iterations, Moves_ahead, GA_iterations, n_samples,\n current_gen_spectra, next_gen_conc, x_test,\n conc_array_actual, spectra_array_actual, seed,\n median_fitness_list, max_fitness_list,\n iteration, mutation_rate_list, fitness_multiplier_list):\n set_seed(seed)\n mutation_rate, fitness_multiplier, best_move, best_move_turn, \\\n max_fitness, surrogate_score, desired_1, current_gen_spectra_1, \\\n best_conc_array, \\\n dictionary_of_moves = MCTS(Iterations, Moves_ahead,\n GA_iterations, current_gen_spectra,\n next_gen_conc, x_test, conc_array_actual,\n spectra_array_actual, seed, n_samples)\n print('The best move has a fitness value of', max_fitness)\n print('The best move occurs in', best_move_turn, 'turns.')\n print()\n print('The surrogate model has a score of:', surrogate_score)\n print()\n mutation_rate_list.append(mutation_rate)\n fitness_multiplier_list.append(fitness_multiplier)\n current_gen_spectra = current_gen_spectra.T\n current_gen_spectra = MinMaxScaler().fit(current_gen_spectra). \\\n transform(current_gen_spectra).T\n next_gen_conc, median_fitness, max_fitness = perform_iteration(\n current_gen_spectra, next_gen_conc, x_test, 20,\n n_samples, mutation_rate, fitness_multiplier)\n best_conc_array = \\\n best_conc_array[np.argsort(best_conc_array[:, -1])][-1, :]\n print(next_gen_conc)\n return mutation_rate, fitness_multiplier, mutation_rate_list, \\\n fitness_multiplier_list, best_move, best_move_turn, \\\n max_fitness, surrogate_score, next_gen_conc, \\\n best_conc_array, dictionary_of_moves", "def nextGen(self):\n\n p = []\n while len(p) < len(self.p):\n #select mates and produce offspring\n p1, p2 = self.select()\n offspring = self.mate(p1, p2)\n\n #put the offspring in the next generation (with mutation)\n for child in offspring:\n child=self.mutate(child)\n p.append(child)\n \n\n # the world belongs to the new generation\n return p", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n g1, g2, g3, cur, ind = 1, 2, 3, 0, 3\n if n < 3:\n return n\n else:\n while ind < n:\n cur = g3 + 2 * g2 + 3 * g1\n ind += 1\n g1, g2, g3 = g2, g3, cur\n return g3", "def run(self, n):\n new_trajectories = self.enumerate_trajectories(self.gpm.Graph, n, self.source, self.target, max_iter=self.max_iter)\n self._trajectories += new_trajectories", "def next_population(population, w, h, N):\r\n t = lambda entity: entity[1]\r\n population.sort(key=t, reverse=True)\r\n upper_half = population[:len(population)//2]\r\n mutated_half = []\r\n for entity in upper_half:\r\n new_entity = mutate(w, h, entity[0][:], 0.1) #0.1 as p seems to be pretty good in this config.\r\n new_entity = (new_entity[:], count_numbers(gen_board(w, h, new_entity), N))\r\n mutated_half.append(new_entity)\r\n return upper_half+mutated_half", "def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list", "def cycles(n, support, randomize=False):\n support = np.array(support)\n\n def gen(p):\n g = combinations(support, n)\n if randomize:\n g = list(g)\n random.shuffle(g)\n\n for local_support in g:\n for output_p in all_permutations(local_support)(p):\n yield output_p\n\n return gen", "def getNextGeneration(self, chromosomes: ChromList) -> ChromList:\n parents = self.select(chromosomes)\n offspring = self.crossover(parents)\n offspring = self.mutate(offspring)\n return parents + offspring", "def _selection(self) -> None:\n # The size of the new population must be the same as the prev. one\n max_size_of_pop = self._pop_size\n\n # Copy 50% of best chromosomes to the next generation\n num_of_pop_to_next_gen = round(self._pop_size / 2)\n max_size_of_pop -= num_of_pop_to_next_gen\n self._population = self._population[0:num_of_pop_to_next_gen]\n\n # Mutate 25% of the prev. population and add to the next generation\n num_of_mutated_to_next_gen = round(max_size_of_pop / 2)\n max_size_of_pop -= num_of_mutated_to_next_gen\n for i in range(num_of_mutated_to_next_gen):\n # Mutate one member from the prev. generation\n img, _ = self._population[i]\n new_mutated_member = self._mutate(img)\n\n # Apply more mutation to one chromosome(from 0 to 100)\n for i in range(rand.randint(0, 100)):\n new_mutated_member = self._mutate(new_mutated_member)\n\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the mutated chromosome to the next generation\n self._population.append((new_mutated_member, fitval))\n\n # For remaining 25% of the prev. population do crossing overs\n num_of_crossing_overs_to_next_gen = max_size_of_pop\n max_size_of_pop -= num_of_crossing_overs_to_next_gen\n\n for i in range(num_of_crossing_overs_to_next_gen):\n # Choose 2 chromosomes, then do one crossing over\n img_ext_1, _ = self._population[i]\n img_ext_2, _ = self._population[rand.randint(0, num_of_pop_to_next_gen)]\n\n new_mutated_member = self._crossing_over(img_ext_1, img_ext_2)\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the derived chromosome to the next generation.\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((new_mutated_member, fitval))\n\n # Sort the new generation in increasing order based on the fitness value of each chromosome\n self._population.sort(key=lambda x: x[1])\n print(f'Best chromosome fit value: {self._population[0][1]}')", "def spawn_visitors(self, n):\n spawnable_positions = self.get_all_spawnable_cells()\n for _ in range(n):\n\n visitor = Visitor(self.next_id(), self, female_ratio=self.female_ratio, adult_ratio=self.adult_ratio,\n familiarity=self.familiarity)\n\n pos = random.choice(spawnable_positions)\n\n self.grid.place_agent(agent=visitor, pos=pos)\n self.schedule.add(visitor)", "def _ppsc_gossip(self):\n for i in range(self.s):\n for edges in self.all_edges:\n # for each connected component\n selected_edge = random.choice(edges)\n self._ppsc_gossip_edge(selected_edge)", "def run(self):\n population_p = self.create_population()\n population_p = self.sort_population(population_p)\n best_x = population_p[0]\n for k in range(self.iteration):\n population_r = []\n # random.shuffle(population_p)\n for i in range(0, self.population_length, 2):\n mother = 0\n father = 1\n children = [self.random_chromosome(), self.random_chromosome()]\n while (mother == father) or (children[0] in population_p) or (children[1] in\n population_p):\n mother = random.randint(0, self.population_length - 1)\n father = random.randint(0, self.population_length - 1)\n children = self.cross(population_p[mother], population_p[father])\n children[0] = self.mutate(children[0])\n children[1] = self.mutate(children[1])\n\n population_r.append(children[0])\n population_r.append(children[1])\n\n population_p = self.new_population(population_p, population_r)\n if self.fitness(population_p[0]) < self.fitness(best_x):\n best_x = population_p[0]\n\n # print(population_p)\n return best_x", "def select(individuals, n):\r\n # return selBest(individuals, n)\r\n return individuals[:n]", "def _next(self, population):\n # split the population for crossover\n selected, the_rest = self._split_population(\n population, self._get_selected_number(population,\n self._selection_crossover))\n\n # crossover\n generated_items_crossover = []\n while len(selected) >= 2:\n male, female = random.sample(selected, 2)\n selected.remove(male)\n selected.remove(female)\n generated_items_crossover.extend(\n self._crossover.crossover(male, female))\n\n # if there is a impar number of selected items\n # add it back to the list\n the_rest.extend(selected)\n\n # Make the mutations\n selected, the_rest = self._split_population(\n the_rest, self._get_selected_number(population,\n self._selection_mutation))\n # mutation\n generated_items_mutation = []\n for item in selected:\n generated_items_mutation.append(self._mutation.mutate(item))\n\n # compute the population\n population = []\n population.extend(the_rest)\n population.extend(generated_items_crossover)\n population.extend(generated_items_mutation)\n\n return population", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n vals = [1, 2, 3]\n if n <= 3:\n return vals[n-1]\n for i in range(n - 3):\n new_val = 3 * vals[0] + 2 * vals[1] + 1 * vals[2]\n vals = vals[1:] + [new_val]\n return vals[-1]", "def evolve_generation(pop, probs, best_member, p_c, p_m):\n if best_member is None:\n new_pop = []\n else:\n new_pop = [best_member]\n while len(new_pop) < len(pop):\n NN1, NN2 = np.random.choice(pop, size=2, p=probs)\n new_pop.append(crossover(NN1, NN2, p_c, p_m))\n return new_pop", "def puzzle_generator():\r\n print(\"Generating puzzles...\")\r\n puzzle_container = []\r\n while len(puzzle_container) < 25:\r\n next_state_tuple = ()\r\n check_dict = {}\r\n \r\n initial_state_tuple = ([[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]], (0, 0))\r\n for i in range(20):\r\n state_container = next_possible_states([initial_state_tuple], check_dict, True)\r\n try:\r\n next_state_tuple = random.choice(state_container)\r\n initial_state_tuple = next_state_tuple\r\n except IndexError:\r\n if initial_state_tuple not in puzzle_container:\r\n puzzle_container.append(initial_state_tuple)\r\n break\r\n if initial_state_tuple not in puzzle_container:\r\n puzzle_container.append(initial_state_tuple)\r\n \r\n if len(puzzle_container) == 25:\r\n print(\"25 distinct puzzles are succesfully generated!\")\r\n return puzzle_container\r\n else:\r\n print(\"Puzzle generation failed!\")", "def run(self):\n for i in range(self.generations):\n log.info(f'Training population in generation {i + 1}...')\n if i == 0:\n self.create_first_generation()\n else:\n self.create_next_generation()\n log.info(f'best individual: {self.best_individual()[1]}')\n log.info(f'best individual score: {self.best_individual()[0]}')", "def tournament_selector(population, size = 5):\n while True:\n sample_ix = nprand.random_integers(0, len(population) - 1, size)\n # because of sorted-ness, best ind is in smallest ix\n yield population[sample_ix.min()]", "def generate(group, number, n):\n return [get_group(group, number) for i in xrange(n)]", "def evolve(self):\n self.generation = 0\n start_time = time.time()\n\n # while the termination criteria is not satisfied, makes another generation\n while not self.termination_criteria.satisfied(self.generation, time.time()-start_time, self.population):\n self.generation += 1\n #print str(self.generation)\n next_generation = []\n\n if self.elitism:\n # Keeps the 10% best individuals\n best_individuals = heapq.nsmallest(int(0.1*self.population_size), self.population, lambda individual: individual.get_fitness())\n next_generation += copy.deepcopy(best_individuals)\n\n # select genetic operation probabilistically\n # this is a roulette wheel selection\n operations = numpy.random.choice(['reproduction', 'crossover', 'mutation'], size=self.population_size, p=[self.reproduction, self.crossover, self.mutation]).tolist()\n individuals = numpy.random.choice(self.population, p=self.normalized_fitness, size=2*self.population_size, replace=True).tolist()\n\n while len(next_generation) < self.population_size:\n operation = operations.pop()\n individual = individuals.pop()\n individual.get_fitness() # enforce fitness calculation\n\n if operation == 'reproduction':\n next_generation.append(individual)\n elif operation == 'crossover':\n individual2 = individuals.pop()\n individual2.get_fitness() # enforce fitness calculation\n individual1, individual2 = individual.crossover(individual2)\n next_generation.append(individual1)\n next_generation.append(individual2)\n elif operation == 'mutation':\n individual1 = individual.mutate()\n next_generation.append(individual1)\n\n self.population = next_generation\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n\n mean = numpy.mean(self.population_fitness)\n std = numpy.std(self.population_fitness)\n min = self.population_fitness.min()\n\n info_mean = pandas.DataFrame([[self.generation, mean, min, std]], columns=[\"generation\", \"mean\", \"min\", \"std\"])\n self.generation_info = self.generation_info.append(info_mean, ignore_index=True)", "def step(self, generation_idx, log_writer):\n # Sort the population by fitness and select the top\n sorted_fit_idxs = list(reversed(sorted(zip(self.fitnesses, itools.count()))))\n sorted_pop = [self.population[ix] for _, ix in sorted_fit_idxs]\n\n # recalculate the fitness of the elite subset and find the best individual\n max_fitness, max_idx = sorted_fit_idxs[0]\n for cp_from, cp_to in zip(sorted_pop, self.selected):\n cp_to.model.load_state_dict(cp_from.model.state_dict())\n\n log_writer.add_scalar(\"Best/fitness\", sorted_fit_idxs[0][0], generation_idx)\n log_writer.add_scalar(\"Best/learning rate\", self.population[max_idx].learning_rate, generation_idx)\n log_writer.add_scalar(\"Best/avg instinct activation\", self.instinct_average_list[max_idx], generation_idx)\n log_writer.add_scalar(\"Worst/fitness\", sorted_fit_idxs[-1][0], generation_idx)\n log_writer.add_scalar(\"Worst/elite fitness\", sorted_fit_idxs[self.to_select - 1][0], generation_idx)\n log_writer.add_scalar(\"Average fitness\", sum(self.fitnesses) / len(self.fitnesses), generation_idx)\n\n # next generation\n for i in range(self.pop_size):\n if i == max_idx:\n continue\n\n dart = int(torch.rand(1) * self.to_select)\n # Select parent and child\n parent = self.selected[dart]\n child = self.population[i]\n # copy the parent genes to the child genes\n child.model.load_state_dict(parent.model.state_dict())\n child.learning_rate = parent.learning_rate\n # apply mutation to model parameters\n for p in child.model.get_evolvable_params():\n mutation = torch.randn_like(p.data) * self.sigma\n p.data += mutation\n # apply mutation to learning rate\n child.learning_rate += torch.randn((1, 1)).item() * 0.001\n if child.learning_rate < 0:\n child.learning_rate *= -1\n\n if self.sigma > self.min_sigma:\n self.sigma *= self.sigma_decay\n elif self.sigma < self.min_sigma:\n self.sigma = self.min_sigma\n\n return (self.population[max_idx], max_fitness)", "def next_img_pair_to_grow_reconstruction(n_imgs, init_pair, resected_imgs, unresected_imgs, img_adjacency):\n\n if len(unresected_imgs) == 0: raise ValueError('Should not check next image to resect if all have been resected already!')\n straddle = False\n if init_pair[1] - init_pair[0] > n_imgs/2 : straddle = True #initial pair straddles \"end\" of the circle (ie if init pair is idxs (0, 49) for 50 images)\n\n init_arc = init_pair[1] - init_pair[0] + 1 # Number of images between and including initial pair\n\n #fill in images between initial pair\n if len(resected_imgs) < init_arc:\n if straddle == False: idx = resected_imgs[-2] + 1\n else: idx = resected_imgs[-1] + 1\n while True:\n if idx not in resected_imgs:\n prepend = True\n unresected_idx = idx\n resected_idx = random.choice(resected_imgs)\n return resected_idx, unresected_idx, prepend\n idx = idx + 1 % n_imgs\n\n extensions = len(resected_imgs) - init_arc # How many images have been resected after the initial arc\n if straddle == True: #smaller init_idx should be increased and larger decreased\n if extensions % 2 == 0:\n unresected_idx = (init_pair[0] + int(extensions/2) + 1) % n_imgs\n resected_idx = (unresected_idx - 1) % n_imgs\n else:\n unresected_idx = (init_pair[1] - int(extensions/2) - 1) % n_imgs\n resected_idx = (unresected_idx + 1) % n_imgs\n else:\n if extensions % 2 == 0:\n unresected_idx = (init_pair[1] + int(extensions/2) + 1) % n_imgs\n resected_idx = (unresected_idx - 1) % n_imgs\n else:\n unresected_idx = (init_pair[0] - int(extensions/2) - 1) % n_imgs\n resected_idx = (unresected_idx + 1) % n_imgs\n\n prepend = False\n return resected_idx, unresected_idx, prepend", "def create_next_generation(pop, pop_num, fit_val, mut_prob, kd_min, kd_max, kp_min, kp_max, ki_min, ki_max):\n #Saves top 3 performing genomes\n pop_top = []\n for m in range(1) :\n pop_top.append(pop[m])\n\n #Crossover performed in top 20\n pop_cross = []\n for n in range(25):\n new_pop1 = crossover(pop[n], pop[n+1])\n pop_cross.append(new_pop1)\n\n #Adds all currently available members\n #Then mutates them.\n pop_new = []\n pop_premut = []\n pop_premut = pop_top + pop_cross\n pop_new = mutate(pop_premut, mut_prob, kd_min, kd_max, kp_min, kp_max, ki_min, ki_max)\n\n #Create random members and saves them \n for s in range(pop_num - len(pop_new)):\n #Creating the random PID values\n kd_cur = round(random.uniform(kd_min, kd_max), 2)\n kp_cur = round(random.uniform(kp_min, kp_max), 2)\n ki_cur = round(random.uniform(ki_min, ki_max), 2)\n #Into 2-D List. Access via pop[i][j]\n pop_new.append([kd_cur, kp_cur, ki_cur])\n return pop_new", "def generate_candidate_grasps(object_name, dataset, stable_pose,\n num_grasps, gripper, config):\n grasp_set = []\n grasp_set_ids = []\n grasp_set_metrics = []\n\n # read params\n approach_dist = config['approach_dist']\n delta_approach = config['delta_approach']\n rotate_threshold = config['rotate_threshold']\n table_clearance = config['table_clearance']\n dist_thresh = config['grasp_dist_thresh']\n\n # get sorted list of grasps to ensure that we get the top grasp\n graspable = dataset.graspable(object_name)\n graspable.model_name_ = dataset.obj_mesh_filename(object_name)\n grasps = dataset.grasps(object_name, gripper=gripper.name)\n all_grasp_metrics = dataset.grasp_metrics(object_name, grasps, gripper=gripper.name)\n mn, mx = graspable.mesh.bounding_box()\n alpha = 1.0 / np.max(mx-mn)\n print alpha\n\n # prune by collisions\n rave.raveSetDebugLevel(rave.DebugLevel.Error)\n collision_checker = gcc.OpenRaveGraspChecker(gripper, view=False)\n collision_checker.set_object(graspable)\n\n # add the top quality grasps for each metric\n metrics = config['candidate_grasp_metrics']\n for metric in metrics:\n # generate metric tag\n if metric == 'efc':\n metric = db.generate_metric_tag('efcny_L1', config)\n elif metric == 'pfc':\n metric = db.generate_metric_tag('pfc', config)\n elif metric == 'ppc':\n metric = db.generate_metric_tag('ppc_%s' %(stable_pose.id), config)\n\n # sort grasps by the current metric\n grasp_metrics = [all_grasp_metrics[g.grasp_id][metric] for g in grasps]\n grasps_and_metrics = zip(grasps, grasp_metrics)\n grasps_and_metrics.sort(key = lambda x: x[1])\n grasps = [gm[0] for gm in grasps_and_metrics]\n grasp_metrics = [gm[1] for gm in grasps_and_metrics]\n\n # add grasps by quantile\n logging.info('Adding best grasp for metric %s' %(metric))\n i = len(grasps) - 1\n grasp_candidate = grasps[i].grasp_aligned_with_stable_pose(stable_pose)\n\n # check wrist rotation\n psi = grasp_candidate.angle_with_table(stable_pose)\n rotated_from_table = (psi > rotate_threshold)\n\n # check distances\n min_dist = np.inf\n for g in grasp_set:\n dist = grasp_module.ParallelJawPtGrasp3D.distance(g, grasp_candidate)\n if dist < min_dist:\n min_dist = dist\n\n # check collisions\n while gripper.collides_with_table(grasp_candidate, stable_pose, table_clearance) \\\n or collides_along_approach(grasp_candidate, gripper, collision_checker, approach_dist, delta_approach) \\\n or rotated_from_table or grasp_candidate.grasp_id in grasp_set_ids \\\n or min_dist < dist_thresh:\n # get the next grasp\n i -= 1\n if i < 0:\n break\n grasp_candidate = grasps[i].grasp_aligned_with_stable_pose(stable_pose)\n\n # check wrist rotation\n psi = grasp_candidate.angle_with_table(stable_pose)\n rotated_from_table = (psi > rotate_threshold)\n\n # check distances\n min_dist = np.inf\n for g in grasp_set:\n dist = grasp_module.ParallelJawPtGrasp3D.distance(g, grasp_candidate)\n if dist < min_dist:\n min_dist = dist\n\n # add to sequence\n if i >= 0:\n grasp_set.append(grasp_candidate)\n grasp_set_ids.append(grasp_candidate.grasp_id)\n grasp_set_metrics.append(all_grasp_metrics[grasp_candidate.grasp_id])\n\n # sample the remaining grasps uniformly at random\n i = 0\n random.shuffle(grasps)\n while len(grasp_set) < num_grasps and i < len(grasps):\n # random grasp candidate\n logging.info('Adding grasp %d' %(len(grasp_set)))\n grasp_candidate = grasps[i].grasp_aligned_with_stable_pose(stable_pose)\n\n # check wrist rotation\n psi = grasp_candidate.angle_with_table(stable_pose)\n rotated_from_table = (psi > rotate_threshold)\n\n # check distances\n min_dist = np.inf\n for g in grasp_set:\n dist = grasp_module.ParallelJawPtGrasp3D.distance(g, grasp_candidate)\n if dist < min_dist:\n min_dist = dist\n\n # check collisions\n while gripper.collides_with_table(grasp_candidate, stable_pose) \\\n or collides_along_approach(grasp_candidate, gripper, collision_checker, approach_dist, delta_approach) \\\n or rotated_from_table or grasp_candidate.grasp_id in grasp_set_ids \\\n or min_dist < dist_thresh:\n # get the next grasp\n i += 1\n if i >= len(grasps):\n break\n grasp_candidate = grasps[i].grasp_aligned_with_stable_pose(stable_pose)\n\n # check wrist rotation\n psi = grasp_candidate.angle_with_table(stable_pose)\n rotated_from_table = (psi > rotate_threshold)\n\n # check distances\n min_dist = np.inf\n for g in grasp_set:\n dist = grasp_module.ParallelJawPtGrasp3D.distance(g, grasp_candidate)\n if dist < min_dist:\n min_dist = dist\n\n # add to sequence\n if i < len(grasps):\n grasp_set.append(grasp_candidate)\n grasp_set_ids.append(grasp_candidate.grasp_id)\n grasp_set_metrics.append(all_grasp_metrics[grasp_candidate.grasp_id])\n\n return grasp_set, grasp_set_ids, grasp_set_metrics", "def ssf(n, N_max = 6, E_max = 4, time_total = 100 ):\n\n G = nx.DiGraph()\n G.add_edge( 0, 1, time_stamp = 1)\n\n # List of existing nodes, with nodes repeated once for each adjacent edge\n repeated_nodes = [0, 1]\n\n # compute discrete bins for nodes \n DistNodes = _get_distribution( N_max, 0 )\n # compute discrete bins for edges\n DistLinks = _get_distribution( E_max, 1 )\n \n # Start adding the other n - 2 nodes.\n N_current = time_current = 2\n while N_current < n and time_current < time_total + 1:\n # preferential attachment\n N_current = _preferential_attachment( DistNodes, DistLinks, N_current,\n time_total, time_current, n, repeated_nodes, G )\n time_current += 1\n \n# print(time_current, N_current)\n return G", "def run(self):\n values_to_set = self._load().get_initial_values()\n\n best_data = []\n worst_data = []\n found = False\n overall_nb_generations_done = 0\n restart_counter = 0\n\n while overall_nb_generations_done < self._max_nb_generations and not found:\n new_population = ga_utils.create_generation(self._population_size, values_to_set)\n\n nb_generations_done = 0\n remember_the_best = 0\n nb_generations_without_improvement = 0\n\n # Loop until max allowed generations is reached or a solution is found\n while nb_generations_done < self._max_nb_generations and not found:\n # Rank the solutions\n ranked_population = ga_utils.rank_population(new_population)\n best_solution = ranked_population[0]\n best_score = best_solution.fitness()\n worst_score = ranked_population[-1].fitness()\n best_data.append(best_score)\n worst_data.append(worst_score)\n\n # Manage best value and improvements among new generations over time\n if remember_the_best == best_score:\n nb_generations_without_improvement += 1\n else:\n remember_the_best = best_score\n if 0 < self._restart_after_n_generations_without_improvement < nb_generations_without_improvement:\n print(\"No improvement since {} generations, restarting the program\".\n format(self._restart_after_n_generations_without_improvement))\n restart_counter += 1\n break\n\n # Check if problem is solved and print best and worst results\n if best_score > 0:\n print(\"Problem not solved on generation {} (restarted {} times). Best solution score is {} and \"\n \"worst is {}\".format(nb_generations_done, restart_counter, best_score, worst_score))\n # Not solved => select a new generation among this ranked population\n # Retain only the percentage specified by selection rate\n next_breeders = ga_utils.pick_from_population(ranked_population, self._selection_rate,\n self._random_selection_rate)\n\n children = ga_utils.create_children_random_parents(next_breeders, self._nb_children)\n new_population = ga_utils.mutate_population(children, self._mutation_rate)\n\n nb_generations_done += 1\n overall_nb_generations_done += 1\n else:\n print(\"Problem solved after {} generations ({} overall generations)!!! Solution found is:\".\n format(nb_generations_done, overall_nb_generations_done))\n best_solution.display()\n found = True\n print(\"It took {} to solve it\".format(tools.get_human_readable_time(self._start_time, time())))\n\n if not found:\n print(\"Problem not solved after {} generations. Printing best and worst results below\".\n format(overall_nb_generations_done))\n ranked_population = ga_utils.rank_population(new_population)\n best_solution = ranked_population[0]\n worst_solution = ranked_population[-1]\n print(\"Best is:\")\n best_solution.display()\n print(\"Worst is:\")\n worst_solution.display()\n\n graphics.draw_best_worst_fitness_scores(best_data, worst_data)", "def get_n_random_itrees(self, n, subs_size):\n random_itrees = np.empty(n, dtype=object) # Allocate list for storing the trees.\n # TODO: parallelize!\n for k in np.arange(n):\n # Get a random sample of training examples to build next random itree.\n data_sub = self.data[np.random.choice(self.data.shape[0], subs_size, replace=False), :]\n random_itrees[k] = self.get_random_itree(data_sub) # Get next random itree \n self.random_itrees = random_itrees\n self.subs_size = subs_size", "def generate(bat_size, s=\"train\"):\n while True:\n pairs, targets = get_batch(bat_size, s)\n yield (pairs, targets)", "def generate(generations, population, nn_param_choices, X, Y):\r\n\toptimizer = Optimizer(nn_param_choices)\r\n\tnetworks = optimizer.create_population(population)\r\n\r\n\t# Evolve the generation.\r\n\tfor i in range(generations):\r\n\t\tlogging.info(\"***Doing generation %d of %d***\" %\r\n\t\t\t\t\t (i + 1, generations))\r\n\r\n\t\t# Train and get accuracy for networks.\r\n\t\tthreads = train_networks(networks, X, Y)\r\n\r\n\t\t# Get the average accuracy for this generation.\r\n\t\taverage_accuracy = get_average_accuracy(networks)\r\n\r\n\t\t# Print out the average accuracy each generation.\r\n\t\tlogging.info(\"Generation average: %.2f%%\" % (average_accuracy * 100))\r\n\t\tlogging.info('-'*80)\r\n\r\n\t\t# Evolve, except on the last iteration.\r\n\t\tif i != generations - 1:\r\n\t\t\t# Do the evolution.\r\n\t\t\tnetworks = optimizer.evolve(networks, threads)\r\n\r\n\t# Sort our final population.\r\n\tnetworks = sorted(networks, key=lambda x: 1-x.squared_err, reverse=True)\r\n\r\n\t# Print out the top 5 networks.\r\n\tprint_networks(networks[:5])", "def genetic_algorithm(population, lamda):\n maxGenerations = 5000\n generations_count = 0\n while generations_count <= maxGenerations:\n new_population = []\n generations_count += 1\n for i in range(0, len(population)):\n x = random_select(population, lamda)\n y = random_select(population, lamda)\n child = cross_over(x, y)\n child = mutate(child)\n new_population.append(child)\n population = new_population\n # Test for result\n conflicts = find_conflicts(population[i])\n if conflicts == 0:\n return True, population[i], generations_count\n return False, None, maxGenerations", "def _step(self):\n self.sort()\n selection = self._select()\n offspring = self._crossover(selection)\n self._mutate(offspring)\n\n self.sort()\n if self.elite_num > 0:\n offspring[:self.elite_num] = self.population[:self.elite_num]\n\n self.population[:] = offspring\n\n self.sort()\n if self.cull_num > 0:\n self.population[-self.cull_num:] = self._initialize(self.cull_num)", "def __call__(self, s, n=1000):\n\n root = StateNode(None, s, self.game)\n \n if root.parent is not None:\n raise ValueError(\"Root's parent must be None.\")\n \n for _ in range(n):\n #selection\n node = _get_next_node(root, self.tree_policy)\n #simulation\n node.reward = self.default_policy(node)\n #print(node.reward)\n #back\n self.backup(node)\n \n root.reset(copy.deepcopy(self.game_bak))\n \n #for i in root.children:\n # print(root.children[i].__dict__)\n # for j in root.children[i].children:\n # print(root.children[i].children[j].__dict__)\n # print(\"=======\")\n return rand_max(root.children.values(), key=lambda x: x.q).action, rand_max(root.children.values(), key=lambda x: x.q).q", "def next_epoch(self, fitness_func):\n self.gen_innovations = []\n self.gen_num += 1\n\n self.update_pop_champ()\n self.update_overall_pop_champ()\n\n print('GEN:', self.gen_num)\n\n if self.overall_pop_champ is not None:\n print('BEST', self.overall_pop_champ.fitness)\n print('GenBest', self.pop_champ.fitness)\n print('Most Nodes', len(self.get_most_nodes().node_genes))\n print('Most Links', len(self.get_most_links().link_genes))\n\n\n new_genomes = []\n # Calculate adjusted fitness for species\n for spec in self.species.values():\n spec.adjust_fitness()\n self.remove_unimproved_species()\n\n tot_adj_fit = self.get_total_adj_fitness()\n print('tot_adj_fit', tot_adj_fit)\n self.ave_fitness = self.get_average_fitness()\n print('AverageFitness', self.ave_fitness)\n\n self.verify_genomes()\n\n total_offspring = 0\n offspring_counts = []\n percents = []\n sums = []\n\n for spec in self.species.values():\n spec_fitness = spec.get_total_adj_fitness()\n percent_offspring = spec_fitness / tot_adj_fit\n if len(self.species)==1:\n percent_offspring = 1\n n_offspring = int(self.pop_size * percent_offspring)\n total_offspring += n_offspring\n offspring_counts.append(n_offspring)\n percents.append(percent_offspring)\n sums.append(spec_fitness)\n\n print('Reproducing')\n for spec in self.species.values():\n percent_offspring = sum(g.adj_fitness/tot_adj_fit for g in spec.genomes)\n\n if len(self.species)==1:\n percent_offspring = 1\n n_offspring = int(self.pop_size * percent_offspring)\n new_genomes += spec.reproduce(n_offspring)\n\n self.all_genomes = new_genomes\n print(f'{len(self.all_genomes)} new genomes')\n self.all_genomes.append(self.pop_champ.copy())\n\n before_n_species = len(self.species)\n print('Speciating')\n self.speciate()\n after_n_species = len(self.species)\n print(f'{before_n_species}->{after_n_species} species')\n\n\n print('Computing fitness')\n self.compute_pop_fitness(fitness_func)\n print()", "def run_trial(self, num_nodes): \n #compute the neighbors for the newly-created node\n new_node_neighbors = set()\n for dummy_idx in range(num_nodes):\n new_node_neighbors.add(random.choice(self._node_numbers))\n # update the list of node numbers so that each node number \n # appears in the correct ratio\n self._node_numbers.append(self._num_nodes)\n self._node_numbers.extend(list(new_node_neighbors)) \n #update the number of nodes\n self._num_nodes += 1\n return list(new_node_neighbors)", "def selection(self):\n\n for i in range(self.pop_num*3): # It is important. Next, we will rank the array of parents and children in ascending order of survivability (sum (fit)).\n self.par_and_sons[i].fit = SimpleSegmentationGA.fitness_function(self.gray, self.delta_x, self.length, self.par_and_sons[i].A)\n\n # Sort.\n self.par_and_sons = sorted(self.par_and_sons, key=lambda individ: individ.fit) \n self.population=self.par_and_sons[:self.pop_num].copy()", "def generate_graph(size, number_of_clusters, minimal_size):\n base_list = list(range(size))\n result_list = []\n random.shuffle(base_list)\n for i in range(number_of_clusters - 1):\n size = random.randint(minimal_size, len(base_list) - (number_of_clusters - i - 1) * minimal_size)\n cluster = []\n for n in range(size):\n actual = random.choice(base_list)\n base_list.remove(actual)\n cluster.append(actual)\n result_list.append(strongly_connect(cluster))\n result_list.append(strongly_connect(base_list))\n\n while len(result_list) < 5:\n result_list.append([])\n\n print(sorted([len(i) for i in result_list], reverse=True)[:5])\n\n return weak_connect_graph(result_list)", "def run_genetic_algorithm(self, generations=5000, population_size=100):\n\n population_subset_size = int(population_size / 10.)\n generations_10pct = int(generations / 10.)\n\n # Create a random population of `population_size` number of solutions.\n population = self._generate_random_population(population_size)\n\n # For `generations` number of repetitions...\n for generation in range(generations):\n\n # Compute the fitness of the entire current population\n population_fitness = {}\n\n for agent_genome in population:\n if agent_genome in population_fitness:\n continue\n\n population_fitness[agent_genome] = self._compute_fitness(agent_genome)\n\n # Take the top 10% shortest road trips and produce offspring each from them\n new_population = []\n for rank, agent_genome in enumerate(sorted(population_fitness,\n key=population_fitness.get)[:population_subset_size]):\n\n if (generation % generations_10pct == 0 or generation == generations - 1) and rank == 0:\n print(\"Generation %d best: %d | Unique genomes: %d\" % (generation,\n population_fitness[agent_genome],\n len(population_fitness)))\n print(agent_genome)\n print(\"\")\n\n # Create 1 exact copy of each of the top road trips\n new_population.append(agent_genome)\n\n # Create 2 offspring with 1-3 point mutations\n for offspring in range(2):\n new_population.append(self._mutate_agent(agent_genome, 3))\n\n # Create 7 offspring with a single shuffle mutation\n for offspring in range(7):\n new_population.append(self._shuffle_mutation(agent_genome))\n\n # Replace the old population with the new population of offspring\n for i in range(len(population))[::-1]:\n del population[i]\n\n population = new_population\n\n return population", "def _crossover(self, best_population, crossover, n_parents=2, method=\"uniform_swap\"):\n if crossover:\n # randomly select parents\n parents_indexes = torch.randint(0, len(best_population), (self.population_size, n_parents),\n device=self.device)\n new_population = torch.zeros(self.population.shape, device=self.device)\n i = 0\n for p_idx in parents_indexes:\n new_population[i] = self._produce_child(best_population[p_idx], method=method)\n i += 1\n else:\n # randomly repeat best individuals\n new_pop_indexes = torch.randint(0, len(best_population), (self.population_size,), device=self.device)\n new_population = best_population[new_pop_indexes]\n return new_population", "def generate_N_doping(path, N_graphitic, N_pyridinic, N_pyrrolic, filename1):\n global bond_list\n bond_list = bond_list_1 + bond_list_3\n atom_list = read_in_graphene(path)\n rings = find_rings(atom_list)\n bond_list = bond_list_1 + bond_list_3\n map_3, map_2, map_2n = filter_carbon_atoms(atom_list, rings)\n graphitic = N_graphitic \n pyridinic = N_pyridinic\n pyrrolic = N_pyrrolic\n attempt = len(atom_list) / 10\n choices = [1, 2, 3]\n while (((N_graphitic > 0) or (N_pyridinic > 0) or (N_pyrrolic > 0)) and (attempt > 0)):\n print(\"Left to add: \", \"N_graphitic \", N_graphitic, \"N_pyridinic \", N_pyridinic, \"N_pyrrolic \", N_pyrrolic)\n if (N_graphitic == 0):\n try:\n choices.remove(1)\n except:\n pass\n if (N_pyridinic == 0):\n try:\n choices.remove(2)\n except:\n pass\n if (N_pyrrolic == 0):\n try:\n choices.remove(3)\n except:\n pass\n choice = random.choice(choices)\n if (choice == 1):\n while ((N_graphitic > 0) and (len(map_3) > 0)):\n random_atom = random.choice(map_3)\n N_graphitic -= 1\n N = Atom(random_atom.atom_number, \"N3\", \"N3A\", str(graphitic - N_graphitic), float(\"{0:.3f}\".format(random_atom.x)), float(\"{0:.3f}\".format(random_atom.y)), float(\"{0:.3f}\".format(random_atom.z)))\n if ((len(identify_bonds(random_atom, atom_list)) == 3) and ((identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CY\")) and ((identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CX\") or identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CY\") and ((identify_bonds(random_atom, atom_list)[2][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[2][0].atom_name == \"CY\"))):\n for ring in rings:\n if (random_atom in ring):\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n try:\n atom_list.remove(random_atom)\n except:\n pass\n atom_list.append(N)\n else:\n attempt -= 1\n elif (choice == 2):\n while ((N_pyridinic > 0) and (len(map_2) > 0)): \n random_atom = random.choice(map_2)\n N_pyridinic -= 1\n N = Atom(random_atom.atom_number, \"N2\", \"N2A\", str(pyridinic - N_pyridinic), float(\"{0:.3f}\".format(random_atom.x)), float(\"{0:.3f}\".format(random_atom.y)), float(\"{0:.3f}\".format(random_atom.z)))\n if ((len(identify_bonds(random_atom, atom_list)) == 2) and ((identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CX\") or (identify_bonds(random_atom, atom_list)[0][0].atom_name == \"CY\")) and ((identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CX\") or identify_bonds(random_atom, atom_list)[1][0].atom_name == \"CY\") ):\n found = False\n for ring in rings:\n if (random_atom in ring):\n found = True\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n if (found == False):\n try:\n map_3.remove(random_atom)\n except:\n pass\n try:\n map_2.remove(random_atom)\n except:\n pass\n try:\n map_2n.remove(random_atom)\n except:\n pass\n atom_list.remove(random_atom)\n atom_list.append(N)\n else:\n attempt -= 1\n else: \n attempt -= 1\n elif (choice == 3):\n while ((N_pyrrolic > 0) and (len(map_2n) > 0)):\n random_atom_1 = random.choice(map_2n)\n for neighbour in identify_bonds(random_atom_1, atom_list):\n if (len(identify_bonds(neighbour[0], atom_list)) == 2):\n random_atom_2 = neighbour[0]\n break\n for ring in rings:\n if (random_atom_1 in ring):\n center_6 = {}\n center_6['x'] = 0\n center_6['y'] = 0\n center_6['z'] = 0\n center_4 = {}\n center_4['x'] = 0\n center_4['y'] = 0\n center_4['z'] = 0\n for atom in ring:\n center_6['x'] += atom.x\n center_6['y'] += atom.y\n center_6['z'] += atom.z\n if ((atom != random_atom_1) and (atom != random_atom_2)):\n center_4['x'] += atom.x\n center_4['y'] += atom.y\n center_4['z'] += atom.z\n center_6['x'] /= 6\n center_6['y'] /= 6\n center_6['z'] /= 6\n center_4['x'] /= 4\n center_4['y'] /= 4\n center_4['z'] /= 4\n N_pyrrolic -= 1\n p = 0.6\n limit = 0.3\n if ((-limit < center_4['x'] - center_6['x'] < limit) and (-limit < center_4['y'] - center_6['y'] < limit)): \n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((-limit < center_4['x'] - center_6['x'] < limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((-limit < center_4['x'] - center_6['x'] < limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'])), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (-limit < center_4['y'] - center_6['y'] < limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] < -limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] + p)), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (-limit < center_4['y'] - center_6['y'] < limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'])), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (center_4['y'] - center_6['y'] < -limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'] + p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n elif ((center_4['x'] - center_6['x'] > limit) and (center_4['y'] - center_6['y'] > limit)):\n N = Atom(random_atom_1.atom_number, \"N1\", \"N2N\", str(pyrrolic - N_pyrrolic), float(\"{0:.3f}\".format(center_6['x'] - p)), float(\"{0:.3f}\".format(center_6['y'] - p/2)), float(\"{0:.3f}\".format(center_6['z']))) \n for ring in rings:\n if (random_atom_1 in ring):\n for atom in ring:\n try:\n map_3.remove(atom)\n except:\n pass\n try:\n map_2.remove(atom)\n except:\n pass\n try:\n map_2n.remove(atom)\n except:\n pass\n for mol in identify_bonds(atom, atom_list):\n try:\n map_2n.remove(mol[0])\n except:\n pass\n try:\n atom_list.remove(random_atom_1)\n atom_list.remove(random_atom_2)\n except:\n pass\n atom_list.append(N)\n else:\n attempt -= 1\n attempt -= 1\n writepdb(atom_list, filename1)\n print(\"done.\")\n return 'done.'", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n if n < 4:\n return n\n else:\n g1 = 1\n g2 = 2\n g3 = 3\n i = 3\n while(i < n):\n i += 1\n t = g3 + 2*g2 + 3*g1\n g1 = g2\n g2 = g3\n g3 = t\n return g3", "def stage1(self):\n n = self.min\n while True:\n n, bin_ = self.sort_to_bin(n)\n if n is None:\n n = self.get_new_n(bin_)\n if n is None:\n break\n if self.viz:\n yield", "def sample_pagerank(corpus, damping_factor, n):\n first_page = random.choice(list(corpus))\n model = transition_model(corpus, first_page, DAMPING)\n\n for i in range(n):\n\n choosen = random.random()\n total = 0\n\n for k, v in model.items():\n total += v\n\n if choosen <= total:\n page = k\n break\n \n model = transition_model(corpus, page, DAMPING)\n \n return model", "def selection(self):\n # create an empty priority queue for the new generation\n newGeneration = PriorityQueue()\n # pick top X of the population to survive\n for c in range(0, self.generation.qsize() / SELECTION_FRACTION):\n # get a chromosome\n chromosome = self.generation.get()\n # put the chromosomes in the new generation\n newGeneration.put(chromosome)\n # keep the new generation\n self.generation = newGeneration", "def nextGeneration(self):\n\n # Start a timer to calculate the time the render one generation.\n startTime = int(round(time.time() * 100000))\n\n self.generation += 1\n\n self.setNeighbors()\n self.checkAmountOfNeighbors()\n\n # Ends a timer to calculate the time the render one generation.\n endTime = int(round(time.time() * 100000))\n self.timeToCalcGeneration = (endTime - startTime)", "def get_random_depth_sample(n=8, depths=list(range(2,26,2)), num_samples=100):\n\n def get_states(start):\n frontier = [start]\n frontier_set = {start}\n explored = set()\n\n states = [False for _ in range(len(depths))]\n while not all(states):\n node = frontier.pop(0)\n frontier_set.remove(node)\n explored.add(node)\n\n children = node.get_children()\n\n # It's necessary to shuffle children to get a truly random sample; otherwise, the first child (always\n # produced from the parent by the same action) produced at a certain depth will always be selected,\n # and children produced by other actions will never be selected\n shuffle(children)\n\n for child in children:\n if child not in frontier_set and child not in explored:\n frontier_set.add(child)\n frontier.append(child)\n child.path_cost = node.path_cost+1\n index = depths.index(child.path_cost) if child.path_cost in depths else None\n if index is not None and not states[index]:\n states[index] = {'start': start.sequence, 'end': child.sequence}\n\n return states\n\n depth_sample = [[] for depth in range(len(depths))]\n\n for _ in range(num_samples):\n start = list(range(1,n+2))\n shuffle(start)\n start = PuzzleState(start, path_cost=0)\n\n states = get_states(start)\n print('\\rSet ' + str(_+1) + ' of ' + str(num_samples) + ' complete', end='', flush=True)\n list(map(list.append, depth_sample, states))\n\n return depth_sample", "def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]", "def outer_loop_lp(self, profile, missed_winners):\r\n\r\n # Initialize\r\n stats = self.Stats()\r\n\r\n wmg = profile.getWmg()\r\n known_winners = set()\r\n I = list(wmg.keys())\r\n\r\n G = nx.DiGraph()\r\n G.add_nodes_from(I)\r\n\r\n E = nx.DiGraph()\r\n E.add_nodes_from(I)\r\n for cand1, cand2 in itertools.permutations(wmg.keys(), 2):\r\n if wmg[cand1][cand2] > 0:\r\n E.add_edge(cand1, cand2, weight=wmg[cand1][cand2])\r\n\r\n # print(wmg)\r\n # self.output_graph(E)\r\n\r\n # Add any bridge edges from any tier in E\r\n # These are guaranteed to never be in a cycle, so will always be in the final graph after RP procedure\r\n Gc = G.copy()\r\n Gc.add_edges_from(E.edges())\r\n scc = [list(g.edges()) for g in nx.strongly_connected_component_subgraphs(Gc, copy=True) if len(g.edges()) != 0]\r\n bridges = set(Gc.edges()) - set(itertools.chain(*scc))\r\n G.add_edges_from(bridges)\r\n E.remove_edges_from(bridges)\r\n\r\n stats.num_initial_bridges = len(bridges)\r\n\r\n\r\n # Each node contains (G, E, T, P)\r\n # P is path, where each item is of form (G, E, K, a)\r\n # root = Node(value=(self.edges2string(G.edges(), I), self.edges2string(E.edges(), I)))\r\n root = Node(value=(G, E, [], []))\r\n stackNode = []\r\n stackNode.append(root)\r\n\r\n hashtable = set()\r\n\r\n END = self.BEGIN + self.TIMEOUT\r\n\r\n self.missed_winners = set(missed_winners)\r\n\r\n self.data = {}\r\n for w in missed_winners:\r\n self.data[w] = []\r\n\r\n while stackNode:\r\n # Pop new node to explore\r\n node = stackNode.pop()\r\n (G, E, T, P) = node.value\r\n\r\n if time.perf_counter() > END:\r\n print(\"TIMEOUT\")\r\n return sorted(known_winners), stats\r\n\r\n # Check hash\r\n hash_state = self.edges2string(G.edges(), I) + self.edges2string(E.edges(), I) + self.edges2string(T, I)\r\n if hash_state in hashtable:\r\n stats.num_hashes += 1\r\n if self.debug_mode == 3:\r\n print(\"hashed in outer hashtable\")\r\n continue\r\n hashtable.add(hash_state)\r\n\r\n stats.num_nodes += 1\r\n\r\n if self.debug_mode == 3:\r\n print(\"Popped new node: \")\r\n print(\"G:\", sorted(G.edges()))\r\n print(\"E:\", sorted(E.edges()))\r\n print(\"T:\", sorted(T))\r\n\r\n # Flag for whether expanding the current tier required finding max children\r\n f_found_max_children = 0\r\n\r\n # Continue performing RP on this state as long as tie-breaking order doesn't matter\r\n while len(E.edges()) != 0 or len(T) != 0:\r\n if self.stop_conditions(G, E, T, P, I, known_winners, stats) != -1:\r\n # Stop condition hit\r\n break\r\n\r\n if len(T) == 0:\r\n # Get a new tier\r\n (max_weight, max_edge) = max([(d['weight'], (u, v)) for (u, v, d) in E.edges(data=True)])\r\n T = [(u, v) for (u, v, d) in E.edges(data=True) if d['weight'] == max_weight]\r\n E.remove_edges_from(T)\r\n\r\n if self.debug_mode == 3:\r\n print(\"New tier =\", T)\r\n\r\n if len(T) == 1:\r\n # Tier only has one edge, just add it\r\n if self.debug_mode == 3:\r\n print(\"Only 1 edge in tier\")\r\n\r\n if nx.has_path(G, max_edge[1], max_edge[0]) is False:\r\n E.add_edges_from(T)\r\n P.append((self.edges2string(G.edges(), I), self.edges2string(E.edges(), I), known_winners.copy(), max_edge))\r\n E.remove_edges_from(T)\r\n G.add_edges_from(T)\r\n continue\r\n\r\n\r\n # Perform reductions every step:\r\n\r\n # Compute \"bridge edges\" which are not in any cycle\r\n Gc = G.copy()\r\n Gc.add_edges_from(T)\r\n scc = [list(g.edges()) for g in nx.strongly_connected_component_subgraphs(Gc, copy=True) if len(g.edges()) != 0]\r\n bridges = set(Gc.edges()) - set(itertools.chain(*scc))\r\n G.add_edges_from(bridges)\r\n T = list(set(T) - bridges)\r\n\r\n G_tc = nx.transitive_closure(G)\r\n\r\n # Remove \"inconsistent edges\" that cannot be added to G without causing cycle\r\n reverse_G = nx.DiGraph.reverse(G_tc)\r\n T = list(set(T) - set(reverse_G.edges()))\r\n\r\n # Remove \"redundant edges\": if there is already path from e[0] to e[1], can immediately add e\r\n redundant_edges = set()\r\n for e in T:\r\n if G_tc.has_edge(e[0], e[1]):\r\n redundant_edges.add(e)\r\n G.add_edges_from([e])\r\n stats.num_redundant_edges += len(redundant_edges)\r\n T = list(set(T) - redundant_edges)\r\n\r\n if len(T) == 0:\r\n # No need to find further children, as tier is now empty\r\n if self.debug_mode == 3:\r\n print(\"Tier empty\")\r\n continue\r\n\r\n # Used to break ties\r\n index = 0\r\n\r\n # Add each edge to stack by priority\r\n children = dict()\r\n T = sorted(T)\r\n for e in T:\r\n if not G_tc.has_edge(e[1], e[0]):\r\n f_found_max_children = 1\r\n\r\n Gc = G.copy()\r\n Gc.add_edges_from([e])\r\n Ec = E.copy()\r\n Tc = copy.deepcopy(T)\r\n Tc.remove(e)\r\n Pc = copy.deepcopy(P)\r\n\r\n EUT = E.copy()\r\n EUT.add_edges_from(T)\r\n Pc.append((self.edges2string(G.edges(), I), self.edges2string(EUT.edges(), I), known_winners.copy(), e))\r\n child_node = Node(value=(Gc,Ec,Tc,Pc))\r\n\r\n # LPwinners\r\n G_in_degree = Gc.in_degree(I)\r\n potential_winners = set([x[0] for x in G_in_degree if x[1] == 0])\r\n priority = len(potential_winners - known_winners)\r\n\r\n children[child_node] = (priority, index)\r\n index = index + 1\r\n\r\n if self.debug_mode == 3:\r\n print(\"added edge\", e)\r\n\r\n children_items = sorted(children.items(), key=lambda x: (x[1][0], x[1][1]))\r\n sorted_children = [key for key, value in children_items]\r\n stackNode += sorted_children\r\n break\r\n\r\n if len(E.edges()) == 0 and f_found_max_children == 0:\r\n # E is empty\r\n if self.debug_mode >= 2:\r\n print(\"E is empty\")\r\n self.add_winners(G, P, I, known_winners, stats)\r\n\r\n return sorted(known_winners), stats, self.data", "def create_repeater_chain(distance, n):\r\n G = nx.Graph()\r\n G.add_node('A', pos=np.array([0, 0]))\r\n G.add_node('B', pos=np.array([0, distance]))\r\n G.add_edge('A', 'B')\r\n if n > 0:\r\n G = insert_chain(G, 'A', 'B', n)\r\n nx.set_edge_attributes(G, values=global_file.params.number_of_fibres, name='#fibres')\r\n return G", "def generate(\n seeds=10,\n param_num_nodes=7,\n mode='train',\n param_dim=10,\n param_sel=100,\n param_mu=10,\n param_br=0.05,\n param_activity_wt=None,\n A=None,\n sp_to_id=None,\n min_coord=None,\n max_coord=None,\n org_pts=None,\n ):\n global dim, sel, mu, br, activity_wt, tree_lc, tree_rc, num_nodes\n\n dim=param_dim\n sel=param_sel\n mu=param_mu\n br=param_br\n activity_wt=param_activity_wt\n num_nodes = param_num_nodes\n\n sp_root = 0\n tree = None\n\n if mode == 'train':\n tree, tree_lc, tree_rc = generate_tree(sp_root, num_nodes)\n if param_activity_wt is None:\n # weights for the linear activity function\n num_wts = int(((dim * (dim + 1))/2) + 1)\n activity_wt = np.random.normal(0, 1, num_wts)\n\n if org_pts is None:\n org_pts = []\n # simulate data points\n # format: exampleID, species, values\n # region, species, coord1, coord2, ...., activity_value\n\n for i in tqdm(range(int(seeds))):\n pt_id = i\n\n # pick a random point of d-dimension\n rand_pt = np.random.uniform(min_coord, max_coord, dim)\n curr_pt = np.append([pt_id, sp_root], rand_pt)\n curr_activity = get_activity(modify_pt(rand_pt), activity_wt)\n # print('curr_pt:', curr_pt, 'curr_activity:', curr_activity); exit(0)\n org_pts.append(np.append(curr_pt, curr_activity))\n\n generated_points = []\n full_org_pts = []\n\n if mode == 'train':\n pool = Pool(16)\n sample_bag = pool.map(generate_bag, org_pts)\n for item in sample_bag:\n for val in item:\n val = list(val)\n full_org_pts.append(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n else:\n for val in org_pts:\n val = list(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n\n return generated_points, activity_wt, org_pts, full_org_pts, tree", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n if n <= 3:\n return n\n else:\n i = 3\n x, y, z = 1, 2, 3\n new = 1\n while i < n:\n new = z + (2*y) + (3*x)\n x, y, z = y, z, new \n i += 1\n return new", "def tournament_selection(pool):\n return max(random.sample(pool, len(pool) // 5))", "def select_tournament_dominance_crowding(individuals, k, nr_individuals):\n def binary_tournament(ind1, ind2):\n if ind1.fitness.dominates(ind2.fitness):\n return ind1\n elif ind2.fitness.dominates(ind1.fitness):\n return ind2\n\n if ind1.fitness.crowding_dist < ind2.fitness.crowding_dist:\n return ind2\n elif ind1.fitness.crowding_dist > ind2.fitness.crowding_dist:\n return ind1\n\n if random.random() <= 0.5:\n return ind1\n return ind2\n\n def tournament(tour_individuals):\n best = tour_individuals[0]\n \n for entry in tour_individuals[1::]:\n best = binary_tournament(best, entry)\n return best\n \n chosen = []\n for _ in xrange(0, k):\n tour_individuals = random.sample(individuals, nr_individuals)\n winner = tournament(tour_individuals)\n winner = copy.deepcopy(winner)\n chosen.append(winner)\n return chosen", "def step():\n x_rand = sample()\n x_nearest = new_nearest_neighbour(x_rand)\n x_new = steer(x_nearest, x_rand)\n if obstacle_free(x_nearest, x_new):\n X_near = new_neighbourhood(x_new)\n x_min = x_nearest\n c_min = x_nearest.cost + x_nearest.dist_to(x_new)\n for x_near in X_near:\n if obstacle_free(x_near, x_new) and (x_near.cost + x_near.dist_to(x_new) < c_min):\n x_min = x_near\n c_min = (x_near.cost + x_near.dist_to(x_new) < c_min)\n x_new_node = add_node(x_new, x_min, True)\n for x_near in X_near:\n if obstacle_free(x_near, x_new) and (x_new_node.cost + x_near.dist_to(x_new) < x_near.cost):\n x_near.change_parent(x_new_node)\n # Here I check for goal paths and draw the circle\n updated = False\n if shared.root_path:\n updated = goal_path_resolve(shared.root_path[0])\n updated = updated or goal_path_resolve(shared.nodes[-1])\n if updated:\n diameter = shared.root_path_length\n center = ((shared.root_path[0].x + shared.root_path[-1].x) / 2,\n (shared.root_path[0].y + shared.root_path[-1].y) / 2)\n if shared.region:\n shared.region.remove_from_batch()\n shared.region = ellipse.Ellipse(center[0], center[1], diameter)\n shared.region.add_to_batch()", "def gen_task_sets_fairgen(nsets):\n start = time()\n eval_task_sets = []\n for util in utils:\n task_sets = []\n for i in range(nsets):\n ts = synth.mc_fairgen(set_id=i, u_lo=util, implicit_deadlines=True)\n synth.synth_c_pmf(ts)\n synth.set_fixed_priorities(ts)\n task_sets.append(ts)\n eval_task_sets.append(task_sets)\n stop = time()\n print('Task Set Generation MC-Fairgen: %.3fs' % (stop - start))\n pickle.dump(eval_task_sets, open(task_sets_path + 'task_sets_fairgen', 'wb'))", "def generate_states(self, current_state, no=10):\n future_states = []\n for i in range(no):\n next_state = self.next_state(current_state)\n future_states.append(next_state)\n current_state = next_state\n return future_states", "def semigroup_generators(self):", "def generate_n(k_problem, n):\n return [generate_first_random(k_problem) for i in range(n)]", "def search_loop(max_generations, individuals, grammar, replacement, selection, fitness_function):\n #Evaluate initial population\n evaluate_fitness(individuals, grammar, fitness_function)\n best_ever = max(individuals)\n individuals.sort(reverse=True)\n print_stats(1, individuals)\n for generation in range(2, (max_generations+1)):\n individuals, best_ever = step(\n individuals, grammar, replacement, selection, fitness_function, best_ever)\n print_stats(generation, individuals)\n return best_ever", "def cv_index_partitions(n: int, s: int):\n indices = np.arange(n)\n np.random.shuffle(indices)\n val_size = n // s # size of validation set\n for i in range(s):\n training = np.concatenate(\n (indices[0:i*val_size], indices[(i+1)*val_size:])\n )\n validation = indices[i*val_size:(i+1)*val_size]\n yield training, validation", "def next_member(cfg):\n population = cfg[\"developers\"]\n if cfg[\"developer_strategy\"] == \"round-robin\":\n for member in itertools.cycle(population):\n yield member\n else:\n while True:\n yield random.choice(population)", "def generator(gens):\n if len(gens) < 20:\n gens.append(rule_110(gens[-1], gens[-1].copy(), 1))\n generator(gens)\n return gens", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n counter = 0\n term1 = 3\n term2 = 2\n term3 = 1\n loop = n-3\n\n if n<=3:\n return n\n\n while counter<loop:\n term1,term2,term3=term1+2*term2+3*term3,term1,term2\n counter +=1\n return term1", "def nsga_replacement(random, population, parents, offspring, args):\n survivors = []\n combined = list(population)\n combined.extend(offspring)\n \n # Perform the non-dominated sorting to determine the fronts.\n fronts = []\n pop = set(range(len(combined)))\n while len(pop) > 0:\n front = []\n for p in pop:\n dominated = False\n for q in pop:\n if combined[p] < combined[q]:\n dominated = True\n break\n if not dominated:\n front.append(p)\n fronts.append([dict(individual=combined[f], index=f) for f in front])\n pop = pop - set(front)\n \n # Go through each front and add all the elements until doing so\n # would put you above the population limit. At that point, fall\n # back to the crowding distance to determine who to put into the\n # next population. Individuals with higher crowding distances\n # (i.e., more distance between neighbors) are preferred.\n for i, front in enumerate(fronts):\n if len(survivors) + len(front) > len(population):\n # Determine the crowding distance.\n distance = [0 for _ in range(len(combined))]\n individuals = list(front)\n num_individuals = len(individuals)\n num_objectives = len(individuals[0]['individual'].fitness)\n for obj in range(num_objectives):\n individuals.sort(key=lambda x: x['individual'].fitness[obj])\n distance[individuals[0]['index']] = float('inf')\n distance[individuals[-1]['index']] = float('inf')\n for i in range(1, num_individuals-1):\n distance[individuals[i]['index']] = (distance[individuals[i]['index']] + \n (individuals[i+1]['individual'].fitness[obj] - \n individuals[i-1]['individual'].fitness[obj]))\n \n crowd = [dict(dist=distance[f['index']], index=f['index']) for f in front]\n crowd.sort(key=lambda x: x['dist'], reverse=True)\n last_rank = [combined[c['index']] for c in crowd]\n r = 0\n num_added = 0\n num_left_to_add = len(population) - len(survivors)\n while r < len(last_rank) and num_added < num_left_to_add:\n if last_rank[r] not in survivors:\n survivors.append(last_rank[r])\n num_added += 1\n r += 1\n # If we've filled out our survivor list, then stop.\n # Otherwise, process the next front in the list.\n if len(survivors) == len(population):\n break\n else:\n for f in front:\n if f['individual'] not in survivors:\n survivors.append(f['individual'])\n return survivors", "def print_generations(start, n):\n pb(start)\n for c in range(n):\n print(\"---\")\n start = life_generation(start)\n pb(start)\n return start", "def make_chunk(size, seed=0):\n rng = random.Random(seed)\n cubes = set([Cube()])\n while len(cubes) < size:\n c = rng.choice(list(cubes))\n n = rng.choice(list(c.neighbors()))\n cubes.add(n)\n return cubes", "def randomWins(RNA, wsize, step, trials):\n counter = 0\n rnatrialscores = []\n while counter < trials:\n newRNA = randSeq(RNA)\n rnatrialscores.append(rnaWin(newRNA, wsize, step)[0])\n counter += 1 \n \n return rnatrialscores", "def evolve(self, env, num_generations, num_episodes, num_frames):\n for gen in range(num_generations):\n\n if Trainer.VERBOSE:\n print(\"Generation:\", gen)\n\n # Generate new root Teams\n self.generation()\n\n # Evaluate current agents\n self.evaluation(env, num_episodes, num_frames)\n\n # Perform selection\n self.selection()\n\n # Return to top-performing agent. Typically not used, but nice to have\n ranked_agents = sorted(self.agent_pop, key=lambda rt : rt.team.fitness, reverse=True)\n return ranked_agents[0]", "def population_selection(population, sack, max_weight):\n sorted_population = population_performance(population, sack, max_weight)\n new_gen = []\n \n for fit_member in range(len(sorted_population) - 2): #killing two weakest\n new_gen.append(sorted_population[fit_member][0])\n\n return new_gen", "def make_parents(self):\r\n self.parents = []\r\n \r\n for loopindex in range(0, int(self.population_size * 0.6)):\r\n while True:\r\n if loopindex < int(self.population_size * 6 / 15):\r\n parent = random.choice(self.best_districts)\r\n else:\r\n parent = random.choice(self.worst_districts)\r\n \r\n if parent not in self.parents:\r\n self.parents.append(parent)\r\n break", "def step_through_generations(self, num_steps):\n for island in self._islands:\n for _ in range(num_steps):\n island.execute_generational_step()\n self.archipelago_age += num_steps", "def generate_test_set_dqn(n_free_min, n_free_max, d_edge_min, d_edge_max, Omega_max, Phi_max, Lambda_max,\n weighted, w_max, directed, size_test_set, to_torch=False):\n\n # Initialize the variables\n Budget_max = Omega_max + Phi_max + Lambda_max\n test_set = []\n if to_torch:\n test_set_torch = []\n\n print(\"==========================================================================\")\n print(\"Generates the test set... \\n\")\n\n # for all budgets\n for budget in tqdm(range(1, Budget_max + 1)):\n # initialize the budget's instances list\n test_set_budget = []\n if to_torch:\n test_set_budget_torch = []\n for k in range(size_test_set):\n # generate a random instance\n instance_budget_k = generate_random_instance(\n n_free_min,\n n_free_max,\n d_edge_min,\n d_edge_max,\n Omega_max,\n Phi_max,\n Lambda_max,\n weighted=weighted,\n w_max=w_max,\n Budget_target=budget,\n directed=directed,\n )\n G = instance_budget_k.G\n Omega = instance_budget_k.Omega\n Phi = instance_budget_k.Phi\n Lambda = instance_budget_k.Lambda\n J = instance_budget_k.J\n # solve the instance\n value, D, I, P = solve_mcn(G, Omega, Phi, Lambda, J=J, exact=True)\n # save the value, P, D in the Instance object\n instance_budget_k.value = value\n instance_budget_k.D = D\n instance_budget_k.I = I\n instance_budget_k.P = P\n # pushes it to memory\n test_set_budget.append(instance_budget_k)\n # if we want to save the corresponding InstanceTorch\n # to evaluate the training, we stop at Budget_max - 1\n if to_torch:\n instance_budget_k_torch = instance_to_torch(instance_budget_k)\n test_set_budget_torch.append(instance_budget_k_torch)\n test_set.append(test_set_budget)\n if to_torch:\n test_set_torch.append(test_set_budget_torch)\n\n if not os.path.exists('data'):\n os.mkdir('data')\n folder_name = 'test_data'\n if weighted:\n folder_name += '_w'\n if directed:\n folder_name += '_dir'\n path_test_data = os.path.join('data', folder_name)\n if not os.path.exists(path_test_data):\n os.mkdir(path_test_data)\n # Save the test sets\n file_path = os.path.join(path_test_data, \"test_set.gz\")\n pickle.dump(test_set, open(file_path, \"wb\"))\n if to_torch:\n file_path_torch = os.path.join(path_test_data, \"test_set_torch.gz\")\n pickle.dump(test_set_torch, open(file_path_torch, \"wb\"))", "def make_parallel(self, n):\n return super().make_parallel(n, True)", "def generation(self,rounds):\n a = []\n b = []\n for i in range(rounds):\n self.fight()\n c = self.avgFitness()\n a.append(c[0])\n b.append(c[1])\n self.sort()\n self.cull()\n self.rePop()\n self.refresh()\n self.fight()\n self.sort()\n print self\n plt.scatter([x for x in range(len(a))],a,color = \"red\")\n plt.scatter([x for x in range(len(b))],b,color = \"green\")\n plt.show()", "def generation(self,rounds):\n a = []\n b = []\n for i in range(rounds):\n self.fight()\n c = self.avgFitness()\n a.append(c[0])\n b.append(c[1])\n self.sort()\n self.cull()\n self.rePop()\n self.refresh()\n self.fight()\n self.sort()\n print self\n plt.scatter([x for x in range(len(a))],a,color = \"red\")\n plt.scatter([x for x in range(len(b))],b,color = \"green\")\n plt.show()", "def stochastic_universal_selection(self, fitness, num_parents):\n\n fitness_sum = numpy.sum(fitness)\n if fitness_sum == 0:\n self.logger.error(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n raise ZeroDivisionError(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n probs = fitness / fitness_sum\n probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.\n probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.\n\n curr = 0.0\n\n # Calculating the probabilities of the solutions to form a roulette wheel.\n for _ in range(probs.shape[0]):\n min_probs_idx = numpy.where(probs == numpy.min(probs))[0][0]\n probs_start[min_probs_idx] = curr\n curr = curr + probs[min_probs_idx]\n probs_end[min_probs_idx] = curr\n probs[min_probs_idx] = 99999999999\n\n pointers_distance = 1.0 / self.num_parents_mating # Distance between different pointers.\n first_pointer = numpy.random.uniform(low=0.0, \n high=pointers_distance, \n size=1)[0] # Location of the first pointer.\n\n # Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.\n if self.gene_type_single == True:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=self.gene_type[0])\n else:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=object)\n\n parents_indices = []\n\n for parent_num in range(num_parents):\n rand_pointer = first_pointer + parent_num*pointers_distance\n for idx in range(probs.shape[0]):\n if (rand_pointer >= probs_start[idx] and rand_pointer < probs_end[idx]):\n parents[parent_num, :] = self.population[idx, :].copy()\n parents_indices.append(idx)\n break\n\n return parents, numpy.array(parents_indices)", "def SierpinskiGasketGraph(n):\n from sage.modules.free_module_element import vector\n from sage.rings.rational_field import QQ\n\n if n <= 0:\n raise ValueError('n should be at least 1')\n\n def next_step(triangle_list):\n # compute the next subdivision\n resu = []\n for a, b, c in triangle_list:\n ab = (a + b) / 2\n bc = (b + c) / 2\n ac = (a + c) / 2\n resu += [(a, ab, ac), (ab, b, bc), (ac, bc, c)]\n return resu\n\n tri_list = [list(vector(QQ, u) for u in [(0, 0), (0, 1), (1, 0)])]\n for k in range(n - 1):\n tri_list = next_step(tri_list)\n dg = Graph()\n dg.add_edges([(tuple(a), tuple(b)) for a, b, c in tri_list])\n dg.add_edges([(tuple(b), tuple(c)) for a, b, c in tri_list])\n dg.add_edges([(tuple(c), tuple(a)) for a, b, c in tri_list])\n dg.set_pos({(x, y): (x + y / 2, y * 3 / 4)\n for (x, y) in dg.vertices()})\n dg.relabel()\n return dg", "def two_step_generator(classes: list, paths_list: list, imgs_per_class: int, shape: tuple,\n nb_win: int, greys: bool, nb_to_gen: int, img_gen: ImageDataGenerator) -> list:\n \n datawin = list() \n datagen = list()\n \n for class_ in classes:\n print(class_)\n \n # Images paths list\n class_imgs_path = [paths_list[k] for k in range(len(paths_list)) if class_ in paths_list[k]]\n\n # Randomly choose images\n class_imgs_subset = np.random.choice(class_imgs_path, size=imgs_per_class, replace=False)\n\n # Get images\n class_imgs = get_imgs(class_imgs_subset)\n\n # Step 1: resize and crop on sliding windows\n class_new_imgs = create_windows_imgs(class_imgs, shape=shape, nb_win=nb_win, greys=greys)\n class_new_imgs = np.array(flat_list(class_new_imgs))\n datawin.append(class_new_imgs)\n \n # Step 2: DataGenerator\n class_datagen = datagen_class(class_new_imgs, nb_to_gen, img_gen)\n class_datagen = class_datagen.astype(int)\n\n datagen.append(class_datagen)\n \n return datawin, datagen", "def sampled_clique(clusters,strategy):\n G = nx.Graph()\n sample = []\n #Sample 'size' nodes from a single cluster\n if strategy == \"rand\":\n size = len(clusters)\n while len(sample) < size:\n cluster = random.choice(clusters)\n if len(cluster) >= size:\n sample = random.sample(cluster,size)\n #Sample 1 choice from each cluster\n elif strategy == \"optim\":\n for _,cluster in clusters.items():\n if len(cluster) > 0:\n sample.append(random.choice(cluster))\n for n1 in sample:\n for n2 in sample:\n if n1 != n2:\n G.add_edge(n1,n2)\n return G" ]
[ "0.66252106", "0.61852103", "0.61620736", "0.6136829", "0.61138487", "0.6082674", "0.6079343", "0.6030366", "0.6008962", "0.5985152", "0.5984775", "0.5952923", "0.58702976", "0.5806566", "0.57817847", "0.5779176", "0.57665956", "0.57347685", "0.57222146", "0.5704095", "0.5702277", "0.56958437", "0.564434", "0.5606501", "0.5602759", "0.55948955", "0.5594479", "0.55844074", "0.55707294", "0.55663884", "0.55653155", "0.5556282", "0.55339414", "0.5523098", "0.5498213", "0.54976404", "0.54932636", "0.5480365", "0.5466984", "0.54645395", "0.5458847", "0.5448592", "0.544707", "0.5445355", "0.54433167", "0.54327327", "0.5432549", "0.542449", "0.5408136", "0.53889614", "0.5379408", "0.5378208", "0.53730583", "0.53705645", "0.5361802", "0.53493845", "0.5333055", "0.53292155", "0.53269875", "0.5319445", "0.531044", "0.5294886", "0.5289742", "0.52821493", "0.5281836", "0.52776885", "0.5276277", "0.5273505", "0.5266939", "0.5266758", "0.5265478", "0.526327", "0.5256648", "0.52495974", "0.52469623", "0.5244888", "0.52437717", "0.5230887", "0.52261406", "0.5219751", "0.52194506", "0.5218949", "0.5213141", "0.5210563", "0.52079505", "0.52078265", "0.52016044", "0.519665", "0.51962465", "0.5193513", "0.5183672", "0.5181714", "0.5178835", "0.5177074", "0.51762915", "0.51762915", "0.5173529", "0.51674455", "0.51653856", "0.51604605" ]
0.6936438
0
Just outputting private data.
def get_data(self): return self._fullInput, self._fullOutput
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_private(self):\n print('Account Number : ', self.__Account)\n return \"\"", "def _printable(self):\n pass", "def output_data(self):\n pass", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def print_out():\n pass", "def _print_custom(self):\n pass", "def show_data(self, ):\r\n return print('society_name : {}\\n'\r\n 'flat : {}\\n'\r\n 'house_no : {}\\n'\r\n 'no_of_members : {}\\n'\r\n 'income : {}\\n '\r\n .format(self.society_name, self.flat, self.house_no, self.no_of_members, self.income))", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def print_data(place):\n raise NotImplementedError", "def __repr__(self):\n return '''\n open key ({}, {})\n secret key {}\n '''.format(self.n, self.e, self.__d)", "def __repr__(self):\r\n rep = \"Public for n = {n}:\\n\\n\".format(n=self.n)\r\n rep += \"h = {h}\\n\".format(h=self.h)\r\n return rep", "def show_data():", "def print(self):\n # Your implementation here", "def __repr__(self, verbose=False):\r\n rep = \"Private key for n = {n}:\\n\\n\".format(n=self.n)\r\n rep += \"f = {f}\\n\\n\".format(f=self.f)\r\n rep += \"g = {g}\\n\\n\".format(g=self.g)\r\n rep += \"F = {F}\\n\\n\".format(F=self.F)\r\n rep += \"G = {G}\\n\\n\".format(G=self.G)\r\n if verbose:\r\n rep += \"\\nFFT tree\\n\"\r\n rep += print_tree(self.T_fft, pref=\"\")\r\n return rep", "def nice_output(self):\n return self.des", "def nice_output(self):\n return self.des", "def output_debug_info(self):", "def printOutput(self):\n pass", "def write_output(self):", "def stdout(self):\n pass", "def _Printer(self,data):\r\n sys.stdout.write(\"\\r\\x1b[K\"+data.__str__())\r\n sys.stdout.flush()", "def printDict(self):\n print str(self)", "def pprint(self):\n\t\tPrettyPrintUnicode().pprint(self.data)", "def display(self):\n print(self)", "def __repr__(self):\n\t\treturn repr(self.data)", "def __repr__(self):\n return repr(self.data)", "def print_me(self):\n return \"ID: %s Title: %s\" % (self.ID, self.title)", "def main():\n print(dumps(get_data()))\n return 0", "def printme(self):\n sys.stdout.write(self._header)\n for k in range(len(self)):\n sys.stdout.write(self.line(k))", "def print_data_members(self):\n keyVals = []\n for name in self.data_code['dataNames']:\n vals = getattr(self, name + 's')\n keyVals.append(vals)\n #print \"%ss = %s\" %(name,vals)\n\n msg = ''\n for name in self.data_code['dataNames']:\n msg += '%-10s ' % name\n msg += '\\n'\n\n nModes = len(keyVals[0])\n for i in xrange(nModes):\n for vals in keyVals:\n msg += '%-10g ' % vals[i]\n msg += '\\n'\n return msg + '\\n'", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def Print(self):\n print(self.__dict__)", "def display(self):\n print(str(self))", "def output(self, msg):", "def __pout__(self):\n return self.__str__()", "def nice(self):\n print(self.getName(), \":\", self.getLen())", "def dump(self):\n return", "def dump(self) -> None:\n ...", "def write(self):", "def write(self):", "def print_data(data):\n print(str(data))\n return data", "def do_show(argv):\n\n global PRIVATE_KEY\n\n print(\"Private Key: '\" + PRIVATE_KEY + \"'\")", "def Write(self, data):\n print(data, end=u'')", "def printMixData(self):\n\t\tprint \"OPERATED MIXNODE: Name: %s, address: (%d, %s), PubKey: %s\" % (self.name, self.port, self.host, self.pubk)", "def __repr__(self):\n return '{}({})'.format(self.__class__.__name__, self._data)", "def display(self):\r\n print(self.title, 'written by', self.author)", "def dumpData(self,out):\n out.packSub0('NAME',self.id)\n if getattr(self,'isDeleted',False):\n out.packSub('DELE','i',0)\n return\n out.packSub('FNAM',self.type)\n out.packSub('FLTV','f',self.value)", "def display_data(self):\n # type: () -> dict\n return {}", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def print_contents(self):\n print self.values", "def __str__(self):\n return self.printable()", "def __debug_print__(self):\n print(self.question_data)", "def __str__(self):\n return '\\tNo readable data representation.'", "def print(self):\n\n print(self)", "def dump_content(self):\n raise NotImplementedError()", "def __str__(self):\n return (\"UUID: \" + str(self.uuid) + \"\\n\"\n \"Data: \" + str(self.data) + \"\\n\" +\n \"Tex: \" + str(self.texOutput) + \"\\n\")", "def __repr__(self):\n return str(self.data)", "def __repr__(self):\r\n return f'OPA(\"{self.data}\")'", "def print(self):\r\n self.print_avec_separateur()", "def data(self):\n pass", "def data(self):\n pass", "def write_private(f, priv, name, embedded_flag):\n\n f.write(\"// Define private structure\\n\")\n write_mat(f, priv['L'], 'priv_L')\n write_vec(f, priv['Dinv'], 'priv_Dinv', 'c_float')\n write_vec(f, priv['P'], 'priv_P', 'c_int')\n f.write(\"c_float priv_bp[%d];\\n\" % (len(priv['Dinv']))) # Empty rhs\n\n if embedded_flag != 1:\n write_vec(f, priv['Pdiag_idx'], 'priv_Pdiag_idx', 'c_int')\n write_mat(f, priv['KKT'], 'priv_KKT')\n write_vec(f, priv['PtoKKT'], 'priv_PtoKKT', 'c_int')\n write_vec(f, priv['AtoKKT'], 'priv_AtoKKT', 'c_int')\n write_vec(f, priv['Lnz'], 'priv_Lnz', 'c_int')\n write_vec(f, priv['Y'], 'priv_Y', 'c_float')\n write_vec(f, priv['Pattern'], 'priv_Pattern', 'c_int')\n write_vec(f, priv['Flag'], 'priv_Flag', 'c_int')\n write_vec(f, priv['Parent'], 'priv_Parent', 'c_int')\n\n f.write(\"Priv %s = \" % name)\n if embedded_flag != 1:\n f.write(\"{&priv_L, priv_Dinv, priv_P, priv_bp, priv_Pdiag_idx, \" +\n \"%d, &priv_KKT, priv_PtoKKT, priv_AtoKKT, \" % priv['Pdiag_n'] +\n \"priv_Lnz, priv_Y, priv_Pattern, priv_Flag, priv_Parent};\\n\\n\")\n else:\n f.write(\"{&priv_L, priv_Dinv, priv_P, priv_bp};\\n\\n\")", "def __debug(self):\n\t\tprint \"Dumping Object Chat\"\n\t\tprint self.userA.username +' + '+ self.userB.username", "def __str__(self):\n print_string = 'key: {} | value: {}'.format(\n str(self.key), str(self.value)\n )\n return print_string", "def printPayment(self):\n print self.output()", "def __repr__(self):\n\t\treturn f\"Name: {self.name}\\nAge: {self.age}\"", "def tell(self):\n print('Name {}, Age {}'. format(self.name, self.age), end=\" \")", "def _generate_output(self):\n raise NotImplementedError()", "def dumpData(self,out):\n raise AbstractError", "def dumpData(self,out):\n raise AbstractError", "def _populate_output(self):\n pass", "def __repr__(self):\n return self.pretty_print(self.__dict__)", "def __str__(self):\n str(self.data)\n return str", "def __str__(self):\n buf = io.StringIO()\n args.output.write(buf, self.root, self.headings)\n return buf.getvalue()", "def __str__(self):\r\n return f\"OPA of {self.data}\"", "def Private(self):\n self.Send(self.EncryptString('private\\n'))\n print self.DecryptString(self.Recv(4096))\n print self.DecryptString(self.Recv(4096))", "def data(self):", "def dumps(self):\n pass", "def plugin_data_repr(self):", "def print_attr(self):\n return \"name : {0}\\nprice : {1}\\ndescription : {2}\".format(\n self.name, self.price, self.description\n )", "def dumps(self) -> str:\n ...", "def info(self):", "def info(self):", "def help_dump(self):\n print(DUMP)", "def printSummary(self):\n pass", "def print_contents(self):\n logging.info(self.contents)", "def print_dict(data):\n print data", "def print_self(self):\n #print(f\"\\nself: \\nN: {self.N} \\nQ: {self.Q} \\npi: {self.pi}\"); \n s = ''\n s += f'N: {self.N}, \\n'\n s += f'Q: {self.Q:.2f}, \\n'\n s += f'U: {self.U:2.3f}\\n'\n s += f'policy: ' + ' '.join(f\"{x:2.3f}\" for x in self.pi)\n print(s)\n self.env.render()", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def _show(self, indent = 0):\n print(\" \"*indent, \"Name:\", self.name)\n print(\" \"*indent, \"Description:\", self.description)", "def print(self):\n print('Name:', self.name)\n print('Camera:', self.camera)\n print('Memory:', self.memory)\n print('Ram:', self.ram)\n print('Price:', self.price)\n print('Image:', self.image)", "def dump(self):\n print PccUtString.trimString(self.dumpBuf(), \"\\n\")\n return self", "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "def is_private():", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Recipient ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-version info:\", self.version_info, sep='')\n print(indent, \"|-IP address:\", self.access_IP_address, sep='')\n print(indent, \"|-URL:\", self.access_URL, sep='')\n print(indent, \"|-username for user/pwd credentials:\", self.username_creds, sep='')\n print(indent, \"|-password for user/pwd credentials:\", self.password_creds, sep='')\n print(indent, \"|-key credentials:\", self.key_creds, sep='')\n print(indent, \"|-info about network:\", self.network_info, sep='')", "def get_info(self):\n if self.own_home:\n return print(f'Hi! My name is {self.name}, I\\'m {self.age}. Currently I have {self.own_home} house')\n return print(f'Hi! My name is {self.name}, I\\'m {self.age}. I don\\'t have any home now!')", "def __repr__(self):\n return self.display()", "def write(self):\n pass", "def write(self):\n pass", "def print_data(self, e=0):\n #self.third_mnemo.Hide()\n #print(self.d)\n #print \"n: \" + str(self.n)\n #print \"data dict: \" + str(self.d)\n #print \"file: \" + self.file\n #print \"content: \" + self.txt\n #print(self.n)\n # print \"n: \" + str(self.n) + \" parent: \" + str(self.d[self.n][0]) + \" previous: \" + str(self.d[self.n][1]) + \" mext: \" + str(self.d[self.n][2]) + \" item: \" + str(self.d[self.n][3]) + \" mnemo: \" + str(self.d[self.n][4])\n pass", "def __repr__(self):\n\t\treturn f'{self.body} - {self.author}'" ]
[ "0.69535846", "0.6887361", "0.66857016", "0.66233796", "0.6584199", "0.6315667", "0.62706643", "0.6250064", "0.6220874", "0.6203634", "0.6198626", "0.6130218", "0.6088508", "0.6044061", "0.603079", "0.603079", "0.602542", "0.60135573", "0.5967384", "0.5955937", "0.5954297", "0.59393966", "0.5930053", "0.5930004", "0.59230906", "0.590956", "0.5889143", "0.5882002", "0.5874192", "0.58704567", "0.5863103", "0.58284754", "0.5802114", "0.57997036", "0.57895434", "0.5776846", "0.5775321", "0.57693887", "0.57640606", "0.57640606", "0.57598114", "0.5756978", "0.57553244", "0.5749824", "0.5746759", "0.57314384", "0.5727566", "0.5714684", "0.571176", "0.5705411", "0.57014865", "0.56883377", "0.5683864", "0.5680749", "0.56774145", "0.567183", "0.5661706", "0.565902", "0.56584686", "0.56467664", "0.56467664", "0.56447875", "0.5641053", "0.5639645", "0.5638917", "0.5626401", "0.5621218", "0.56172", "0.56109506", "0.56109506", "0.5610773", "0.56040514", "0.5600174", "0.559783", "0.5597707", "0.55832726", "0.5580845", "0.5579002", "0.5573973", "0.55730736", "0.55576676", "0.5550722", "0.5550722", "0.5550107", "0.55415964", "0.5527007", "0.5511683", "0.5510587", "0.55056703", "0.5505153", "0.5499991", "0.5499678", "0.5497782", "0.5496917", "0.5496073", "0.5494144", "0.5493043", "0.54911387", "0.54911387", "0.5490743", "0.5485394" ]
0.0
-1
Executing every forest in collection, activating their networks. By the way collecting data about best fitness function.
def execute(self): process_list = [] forests_queue = Queue(self.power) iterational = 0 print '| |-starting evaluation, training and validation' for one_forest in self._forests: process_list.append( Process(target=main_async_method, args=(forests_queue, copy(one_forest.to_portal()), iterational, self.settings))) iterational += 1 for proc in process_list: proc.start() for proc in process_list: proc.join() for smth in range(forests_queue.qsize()): tmp = forests_queue.get() self._forests[tmp['place']].fitness = tmp['fitness'] fitness_summ = sum(map(lambda forest: forest.fitness, self._forests)) fss = map(lambda x: x.fitness, self._forests) print 'avg = ', str(sum(fss) / len(fss)), 'max = ', max(fss) self.roulet = map(lambda x: x.fitness / fitness_summ, self._forests)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutate(self):\n for forest in self._forests:\n forest.mutate(self._fullInput)", "def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self.cost_calculator.ensemble_sizes) - 1\n #define cost functions\n cost_func = lambda p: sum(self.cost_calculator.get_cost(p))\n #create model\n self.model = ga(cost_func, bounds, **kwargs)\n \n fitness_list = [];\n \n for i in range(num_iterations):\n #Update\n f = next(self.model)\n #get fitness values\n fitness_list.append(f[0])\n #Output\n print('\\r(%d/%d) '%(i+1,num_iterations), end = '')\n print('top ensemble fitness: %1.1f '%f[0], end = '')\n \n print('\\nDone')\n self.solution = self.cost_calculator.decode_arrangement(self.model.get_solution())", "def main():\n t = []\n for i in range(1, 19):\n t.append(i)\n config = Config()\n config.DEBUG = True\n config['time_list']=t\n config['load_graphs_from_xml']=True\n\n defaults = dict(num_samples=100, max_depth=5, run=0, num_runs=1,num_trees=100, stat='logrank', split_stat='logrank', num_folds=None,exp='flood',\n verbose=True, folds=None, load_graphs_from_xml=True, time_list=t)\n for key, value in defaults.items():\n cur_value = config.get(key, None)\n # print(\"key={0}:cur_value={1}\".format(key,cur_value))\n config[key] = value if cur_value is None else cur_value\n config.DEBUG = True\n #loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n #config.parseOpts()\n print('Start Grow Forest')\n growForest(config)", "def learn(self):\n for a in self.agents:\n a.learn()", "def main():\n create_sets()\n optimal_weights = genetic_algorithm()\n obtain_best_model(optimal_weights)", "def runner(self):\n\n print('[ INFO ]: Initializing the forest fires program runner...')\n\n df, features, predictor = self.preprocess()", "def run(self, verbose=False):\n\n cost = {}; cost[\"best\"] = []; cost[\"mean\"] = []\n for i in range(self.max_iters):\n\n # prints out information at current cycle\n if verbose:\n print(\"Iteration: {}\".format(i),\n \"Fitness: {}\".format(self.forest[0][0]))\n\n # reproduction phase\n self.reproduce()\n\n # seed dispersal phase\n self.seedlings = []\n for tree in self.population:\n self.disperse(tree[1])\n tree[1].year += 1\n\n # selection phase\n self.select()\n\n # decays exploration parameters\n if (self.epsilon > 0):\n self.epsilon -= self.epsilon_decay\n\n # stores statistics and updates counter of iterations\n cost[\"best\"].append(self.population[0][0])\n cost[\"mean\"].append( sum( [ tree[0] for tree in self.population ] )\\\n / len(self.population) )\n self.iteration += 1\n\n return cost", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def apply_neurons(self):\n for neuron in range(self.n_outputs):\n self.uf_activate(neuron)", "def run_all(self):\n # print(\"running all nodes\")\n executed = set()\n node_update_states = {node: node.block_updates for node in self.flow_view.node_items}\n\n def traverse_upwards(node):\n # Traverse upwards to the top of data flow graph\n if node in executed:\n return\n for port in node.inputs:\n for connection in port.connections:\n traverse_upwards(connection.out.node)\n # print(\"executing\", node)\n node.update_event()\n executed.add(node)\n\n for node in self.flow_view.node_items:\n node.block_updates = True\n\n for node in self.flow_view.node_items:\n traverse_upwards(node)\n\n for node in self.flow_view.node_items:\n node.block_updates = node_update_states[node]\n # print(\"All nodes executed\")", "def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)", "def run_all():\n db = DBInterface()\n year = Config.get_property(\"league_year\")\n session = Session(bind=db.engine)\n\n scraper.scrape_all(db, session, year)\n session.commit()\n\n bets.predict_all(db, session)\n session.commit()\n session.close()", "def run(self):\n for i in range(self.generations):\n log.info(f'Training population in generation {i + 1}...')\n if i == 0:\n self.create_first_generation()\n else:\n self.create_next_generation()\n log.info(f'best individual: {self.best_individual()[1]}')\n log.info(f'best individual score: {self.best_individual()[0]}')", "def train(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n net.train()", "def run_evolutionary_generations(self):\n \n # Evolve the generation.\n for i in range(self.generations):\n logging.info(\"***Doing generation %d of %d***\" %\n (i + 1, self.generations))\n \n self.train_networks(self.networks)\n \n if self.is_classification:\n average_accuracy, highest_accuracy, lowest_accuracy, highest_scoring_network = self.get_accuracy_stats(self.networks) \n \n if highest_scoring_network is not None:\n highest_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_acc%f\" % (i, highest_accuracy)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_accuracy * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_accuracy * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_accuracy * 100))\n logging.info('-'*80)\n else:\n average_loss, highest_loss, lowest_loss, best_scoring_network = self.get_loss_stats(self.networks) \n if best_scoring_network is not None:\n best_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_loss%f\" % (i, lowest_loss)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_loss * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_loss * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_loss * 100))\n logging.info('-'*80)\n # Evolve, except on the last iteration.\n if i != self.generations - 1:\n self.networks = self.optimizer.evolve(self.networks)\n \n self.save_network_objects(self.networks)\n \n if self.is_classification:\n self.networks = sorted(self.networks, key=lambda x: x.accuracy, reverse=True)\n else:\n self.networks = sorted(self.networks, key=lambda x: x.loss, reverse=False)\n \n self.print_networks(self.networks[:5])\n \n self.save_trained_network_models(self.dataset, self.networks[:5])", "def do_make_(self):\n global g_list_of_classifier\n\n for ite_clf in g_list_of_classifier:\n ite_clf.learn()\n return ''", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def set_train(self):\n for m in self.models.values():\n m.train()", "def train(self) -> None:\n for module in self.modules.values():\n module.train()\n return", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def run(self):\n for _ in range(self.epoch, conf.FX_MAX_EPOCHS):\n self.train()\n\n with torch.no_grad():\n self.test()\n\n self.epoch += 1\n self.save_ck()\n\n self.show_completion_msg()", "def train(self):\n max_tuple = self.max_gain()\n # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop\n while max_tuple.gain != 0:\n max_tuple.node.split(max_tuple.attribute)\n max_tuple = self.max_gain()", "def forest(self):\n\n forest_parameters = [{'n_estimators': hel.powerlist(10, 2, 4),\n 'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1], 'n_jobs': [-1]}]\n forest_grid = GridSearchCV(estimator=RandomForestRegressor(),\n param_grid=forest_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n forest_grid_result = forest_grid.fit(self.X_train, self.y_train)\n best_forest_parameters = forest_grid_result.best_params_\n forest_score = forest_grid_result.best_score_\n print('Best forest params: ' + str(best_forest_parameters))\n print('Forest score: ' + str(forest_score))\n return RandomForestRegressor(\n n_estimators=best_forest_parameters['n_estimators'],\n min_samples_leaf=best_forest_parameters['min_samples_leaf'],\n criterion=best_forest_parameters['criterion'],\n random_state=1, n_jobs=-1)", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def run(self):\n for i in range(self.exploration_steps):\n self.single_step(i)\n if self.save_checkpoints:\n self.save_results()\n self.save_results()\n if self.verbose:\n print(\"\\nExploration completed\")\n return", "def explore(self):\n\n i = 0\n while True:\n i += 1\n \n state_counts = {game.__class__.__name__: Counter() for game in self.games} \n\n policies_prime = []\n pi_sum = 0\n v_sum = 0\n counter = 0\n \n # bookkeeping\n log.info(f'Starting Exploration Iteration #{i} ...')\n\n # for task in tasks...\n for _ in range(self.args['taskBatchSize']):\n\n # create deepcopy for training a theta'\n policy_prime = copy.deepcopy(self.nnet)\n \n # sample a game (task)\n game = np.random.choice(self.games, p=self.probs)\n log.info(f'Sampled game {type(game).__name__} ...')\n\n # multiprocess to get our training examples\n iterationTrainExamples = deque([], maxlen=self.args['maxlenOfQueue'])\n iterationTrainExamples = run_apply_async_multiprocessing(self.executeEpisode, [(MCTS(game, self.nnet, self.args), type(game)(), self.args.copy())] * self.args['numEps'], self.args['numWorkers'], desc='Self Play')\n iterationTrainExamples, iter_counters = zip(*iterationTrainExamples)\n\n iterationTrainExamples = list(itertools.chain.from_iterable(iterationTrainExamples))\n state_counts[game.__class__.__name__] += sum(iter_counters, Counter())\n\n # shuffle examples before training\n shuffle(iterationTrainExamples)\n\n # train our network\n pi_v_losses = policy_prime.train(iterationTrainExamples)\n\n policies_prime.append(policy_prime.state_dict())\n\n for pi,v in pi_v_losses:\n pi_sum += pi\n v_sum += v\n counter += 1\n \n # compute average parameters and load into self.nnet\n self.nnet.load_average_params(policies_prime)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n pmcts = MCTS(self.games[0], self.pnet, self.args)\n\n\n # Arena if we choose to run it\n if self.args['arenaComparePerGame'] > 0:\n # ARENA\n nmcts = MCTS(self.games[0], self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena()\n pwins, nwins, draws = arena.playGames(self.pnet, self.nnet, self.args, self.games)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args['updateThreshold']:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='best.pth.tar')\n\n log.info('Iteration Complete. Writing counts to \"%s/%s\"...', *self.args['json_folder_file'])\n # create the json file\n path = os.path.join(self.args['json_folder_file'][0], self.args['json_folder_file'][1])\n with open(path, 'a+') as f:\n if os.stat(path).st_size == 0: ## file just created/empty\n log.info('No counts found. Writing to empty file.')\n old_counts = {game.__class__.__name__: Counter() for game in self.games}\n else: ## load the counts from the file\n log.info('Loading counts...')\n f.seek(0)\n str_counts = f.read()\n # print('STRING OF JSON:', type(str_counts), str_counts)\n old_counts = json.loads(str_counts)\n old_counts = {game: Counter(v) for game, v in old_counts.items()}\n master_counts = {game.__class__.__name__: state_counts[game.__class__.__name__]+old_counts[game.__class__.__name__] for game in self.games}\n # countiung logic: turn {gametype -> Counter} into {gametype -> {state -> count}}\n master_counts = {game: dict(counter) for game, counter in master_counts.items()}\n log.info('Writing...')\n f.truncate(0) #clear file\n json.dump(master_counts, f)\n log.info('Counts written to json file \"%s/%s\"...', *self.args['json_folder_file'])", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def run(self, iterations):\n # print(f'Before:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome before: {self.best_genome.array}, fitness={self.best_genome.fitness} ')\n\n mutator = Rand1MutationOperator(self.population, self.bounds, 0.2)\n mixer = ExponentialCrossoverOperator(self.minfun)\n replacer = ElitistReplacementOperator()\n\n for _ in range(iterations):\n candidate_population = Population(None, None, 0)\n for target in self.population.collection:\n # List with genomes who will be the donors\n mutant = mutator.apply(target)\n # Genome modified by replacing a few random positions\n candidate_genome = mixer.apply(target, mutant)\n\n candidate_population.add(candidate_genome)\n\n # Targets are replaced by candidates from the population if candidate has less fitness than target\n self.population = replacer.apply(self.population, candidate_population)\n\n # print(f'After:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome after: {self.best_genome.array}, fitness={self.best_genome.fitness} ')", "def loadall(bot) :\n for feature in features :\n load(bot, feature)", "def fit(self, train_features, train_actuals):\n for name in self.models.keys():\n print('-'*shutil.get_terminal_size().columns)\n print(\"evaluating {}\".format(name).center(columns))\n print('-'*shutil.get_terminal_size().columns)\n estimator = self.models[name]\n est_params = self.params[name]\n gscv = GridSearchCV(estimator, est_params, cv=5, scoring=self.scoring_metric)\n gscv.fit(train_features, train_actuals)\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\n self.single_classifier_best[name] = gscv", "def bulk_train(self):\n logger.info(\"collecting subfolders - relations\")\n relations = self.collect_subfolders(self.input_dir)\n logger.info(\"relations - {}\".format(relations))\n\n execution_times = []\n\n for rel, rel_path in tqdm(relations.items(), desc=\"relations\"):\n logger.info(\"collecting training files from {}\".format(rel_path))\n tr_files = self.collect_files(rel_path, self.regexp_train)\n hyper_params = self.get_hyperparams()\n hyper_params['graph'] = tr_files\n\n output_folder = os.path.join(self.output_dir, rel)\n if not os.path.exists(output_folder):\n logger.info(\"creating {} (did not exist)\".format(output_folder))\n os.makedirs(output_folder)\n\n for params in tqdm(ParameterGrid(hyper_params), desc=\"training embedding\"):\n logger.info(\"hyperparams: {}\".format(params))\n train_file = params['graph']\n model_name = self.compute_model_name(params, output_folder)\n logger.info('training starspace model \"{}\" from file \"{}\"'.format(\n model_name, train_file))\n external_output, delta = self.call_starspace(params, train_file, model_name)\n logger.info(\"executed in {:0.2f}s\".format(delta))\n\n logger.info(\"external command output logged in {}\".format(self.external_log))\n if not os.path.exists(self.output_dir):\n logger.info(\"creating {} (did not exist)\".format(self.output_dir))\n os.makedirs(self.output_dir)\n\n with open(self.external_log, 'a') as f:\n f.write(external_output)\n\n execution_times.append(dict({ 'time': delta }, **params))\n \n return execution_times", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()", "def run(self):\n\n rf = RandomForestClassifier(**self._settings['specs'])\n\n # Extract sum stats and model indices from ref table\n indices = toArray(self._refTable, 'idx').flatten()\n sumStat = toArray(self._refTable, 'sumstat')\n\n # Do a 5-fold cross-validation\n accuracies = self._cross_val(sumStat, indices, rf, 5)\n\n # Fit on summary statistics (the more the better)\n rf.fit(sumStat, indices)\n\n # Predict probabilities of models on summary obs\n sumStatTest = np.array(self._pp.scaledSumStatObsData).reshape(1, -1)\n pred = rf.predict_proba(sumStatTest)\n\n return {mod : np.round(pred[0,i],3) for i, mod in enumerate(self._modelNames)}", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net_' + name)\n net.eval()", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def train_and_eva():\n for sol in _solvers:\n for sub_u_rate in _sub_u_rates:\n print(\"now processing \" + sol + \" \" + str(sub_u_rate))\n pu_first_stage_training(sol, sub_u_rate)\n first_stage_test(sol, sub_u_rate)\n print(\"\\n\\n\")", "def run(self, generations=1000):\n gcount = 0\n \n while gcount<=generations:\n try:\n print \"Gen: \"+str(gcount),\n self.population = zip (self.population, [self.target]*len(self.population))\n self.population = self.pool.map(f, self.population)\n except:\n pass\n for i in self.population:\n print i[0],i[1]\n self.population = [organism.Organism(x[0], x[1]) for x in self.population]\n self.population.sort()\n print \" Max fitness: \"+str(self.population[::-1][1].fitness)\n try:\n if self.population[0] <= self.ppop[0]:\n self.ppop = self.population[::-1][0:10] # The top ten organisms\n else:\n self.population = self.ppop # We got worse! go back!\n except:\n self.ppop = self.population\n self.population = self.population[::-1][0:10]\n try:\n self.breed()\n except:\n print \"Breeding error\"\n gcount+=1", "def eval_genomes(population, conf):\n for (_, g) in population:\n eval_genome(g, conf)", "def growForest(config, load_exp_file=True):\n\n silent = config.get('silent', False)\n experiment_Path = r\"C:\\Users\\user\\Desktop\\Prediction_model\\experiment\\flood.exp\"\n\n if load_exp_file:\n #loadExperimentFile(config, filename=config.exp)\n loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n\n forests = []\n results = []\n\n\n # do multiple runs if needed. note that we start at config.run, not zero\n for run in range(config.num_runs):\n training_graphs, testing_graphs = splitDict(config.graphs, int(len(config.graphs) * .8), random=True)\n\n \"\"\"\n # perform under-sampling if needed\n if hasattr(config, 'underlabel'):\n under_graphs = {}\n skip_count = 0\n for k in training_graphs.keys():\n if training_graphs[k].class_label == config.underlabel and random.random() <= config.underval:\n skip_count += 1\n else:\n under_graphs[k] = training_graphs[k]\n print('Undersampled ' + str(skip_count) + ' graphs')\n training_graphs = under_graphs\n \"\"\"\n # print out some useful info on the class distribution\n counts = defaultdict(int)\n for graph in training_graphs.values():\n counts[graph.class_label] += 1\n print('training:', len(training_graphs), counts)\n\n counts = defaultdict(int)\n for graph in testing_graphs.values():\n counts[graph.class_label] += 1\n print('testing:', len(testing_graphs), counts)\n\n for graph in training_graphs.values():\n counts[graph.class_label] += 1\n print('total:', len(config.graphs), counts)\n\n print('\\nrun:', run)\n config.run = run\n\n srrf = SRRForest(config)\n #srrf.growForest(training_graphs)\n srrf.growForest(config.graphs)\n forests.append(srrf)\n #srrf.training_graph_ids = list(training_graphs.keys())\n #training_labeling = srrf.labelGraphs(training_graphs,config.time_list)\n #outOfBagLabels=srrf.getOutOfBagLabels()\n #print(\"outOfBagLabels\")\n #print(outOfBagLabels)\n #c=srrf.compute_oob_score(training_graphs, outOfBagLabels)\n #print(\"concordance index:\")\n #print(c)\n config.saveTrees(srrf)\n\n #results.append(c)\n\n\n\n\n \"\"\"\n\n df = pd.DataFrame(columns=['lon', 'lat', 'survival_probability', 'time'])\n\n\n srrf.testing_graph_ids = testing_graphs.keys()\n testing_labeling = srrf.labelGraphs(testing_graphs,config.time_list)\n\n\n\n\n\n\n\n for i,h in testing_labeling.items():\n\n lat = i.graph.attributes_by_type.get(('cell', 'lat'))[0].value\n lon = i.graph.attributes_by_type.get(('cell', 'lon'))[0].value\n for t, label in h.items():\n df = df.append(\n {'lon': lon, 'lat': lat, 'survival_probability': label[1], 'time': t},\n ignore_index=True)\n\n sort_by_time = df.sort_values('time')\n print(sort_by_time.head())\n import plotly.express as px\n fig = px.scatter_mapbox(sort_by_time, lat=\"lat\", lon=\"lon\", hover_data=[\"survival_probability\"],\n color=\"survival_probability\", animation_frame=\"time\", animation_group=\"time\",\n color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10, height=500)\n fig.update_layout(mapbox_style=\"open-street-map\")\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n fig.show()\n \"\"\"\n\n\n\n #config.saveTrees((srrf,)) ###config.saveTree is giving us an eror type error: unable to pickle dict keys.\n\n #print('numruns: %s' % (config.num_runs))\n #print(results)\n\n\n #return results", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def __init__(self,\n lower, upper ,\n fun ,\n max_std, min_std ,\n init_numb_trees = 10 ,\n max_numb_trees = 20 ,\n max_seeds = 10 ,\n min_seeds = 1 ,\n epsilon = 0.1 ,\n epsilon_decay = 0.0 ,\n max_iters = 100 ,\n mut_proba = 0.1 ,\n seed = None ,\n ):\n\n # generates a seed for the random number generator\n if (seed == None):\n self.seed = random.randint(0, 1000)\n else:\n self.seed = seed\n random.seed(self.seed)\n\n # assigns properties of FO algorithm\n self.max_number_trees = max_numb_trees\n self.max_seeds = max_seeds\n self.min_seeds = min_seeds\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.max_iters = max_iters\n self.max_std = max_std\n self.min_std = min_std\n self.mut_proba = mut_proba\n\n # assigns fitness function\n self.evaluate = fun\n\n # stores lower and upper bounds\n self.lower = lower\n self.upper = upper\n\n # evaluates dimension of the optimal problem\n assert ( len(lower)==len(upper) ), \\\n \"'lower' and 'upper' must be of the same dimension.\"\n self.dim = len(lower)\n\n # initialises a forest of trees\n self.population = []\n for _ in range(init_numb_trees):\n tree = Tree(lower, upper)\n if (fun != None):\n self.population.append((fun(tree.vector), tree))\n else:\n self.population.append((sys.float_info.max, tree))\n\n # initialises iterations counter\n self.iteration = 1\n\n # creates a seedlings buffer\n self.seedlings = []", "def finetuned():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='ft', dropout=0.7304, learning_rate=0.0000976)", "def run_all_tasks(data_dir):\n print(\"Training and testing for all tasks ...\")\n for t in range(20):\n run_task(data_dir, task_id=t + 1)", "def eval_all(folder):\n optimizers = [\n tf.keras.optimizers.Adadelta(learning_rate=0.01),\n tf.keras.optimizers.Adagrad(learning_rate=0.002),\n tf.keras.optimizers.Adam(learning_rate=0.0001),\n tf.keras.optimizers.Adamax(learning_rate=0.0005),\n tf.keras.optimizers.Ftrl(learning_rate=0.002),\n tf.keras.optimizers.Nadam(learning_rate=0.001),\n tf.keras.optimizers.RMSprop(learning_rate=0.0005),\n tf.keras.optimizers.SGD(learning_rate=0.003),\n ]\n\n epochs = [\n 500, 120, 80, 150, 300, 60, 100, 500\n ]\n\n biased_randomized = [\n (models.DefaultModel, False),\n (models.BiasedModel, False),\n (models.NeuralModel, False),\n (models.DefaultModel, True),\n (models.BiasedModel, True),\n (models.NeuralModel, True),\n ]\n\n for optimizer, n_epochs in zip(optimizers, epochs):\n for model, rndmz in biased_randomized:\n eval_optimizer(folder,\n model,\n optimizer,\n n_epochs,\n rndmz)", "def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation", "def _process_datasets_all_frames(self):\n datasets = os.listdir(self.separated_root)\n for dataset in datasets:\n dataset_path = join(self.separated_root, dataset)\n\n for model in self.models:\n\n attacks_list = os.listdir(dataset_path)\n\n for attack in attacks_list:\n attack_path = join(dataset_path, attack)\n\n for prop in self.properties:\n property_alias = prop.get_property_alias()\n\n if os.path.exists(\n join(self.output_features, dataset, attack, property_alias, model.alias)):\n print('%s already extracted features' % dataset)\n continue\n\n path_train = join(attack_path, self.train_alias)\n path_test = join(attack_path, self.test_alias)\n\n X_train, y_train, indexes_train, samples_train = self._get_dataset_contents(path_train,\n property_alias)\n X_test, y_test, indexes_test, samples_test = self._get_dataset_contents(path_test,\n property_alias)\n\n output_features = join(self.output_features, dataset, attack, property_alias, model.alias)\n\n features_train = self._fetch_features(X_train, model, output_features, self.train_alias)\n features_test = self._fetch_features(X_test, model, output_features, self.test_alias)\n\n # saving features\n np.save(join(output_features, (NAME_FEATURES % self.train_alias)), features_train)\n np.save(join(output_features, (NAME_FEATURES % self.test_alias)), features_test)\n\n # saving targets\n np.save(join(output_features, (NAME_TARGETS % self.train_alias)), y_train)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n\n # saving samples names\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.train_alias)), samples_train)\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.test_alias)), samples_test)", "def step(self):\n\t\tnumpy.random.shuffle(self.agents_list)\n\t\tfor agent in self.agents_list:\n\t\t\tagent.produce()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.charge()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.retribute()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.give()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.consume()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.solve_consumption_deficit()\n\t\tfor site in self.sites:\n\t\t\tsite.recovery()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.sprout()", "def calc(self):\n\t\tfor neuron in self.neurons.items():\n\t\t\tneuron.calculate()", "def eval_fairgen():\n print(\"Evaluation: Fairgen\")\n modes = [\n # name, function\n ('dSMC', ana.d_smc),\n ('dAMC', ana.d_amc),\n ('EDF-VD', ana.d_edf_vd),\n ('pSMC', ana.p_smc),\n ('pAMC-BB', ana.p_amc_bb),\n ('pAMC-BB+', ft.partial(ana.p_amc_bb, ignore_hi_mode=True))\n ]\n\n pool = mp.Pool()\n task_sets_list = pickle.load(open(task_sets_path + 'task_sets_fairgen', 'rb'))\n for name, func in modes:\n start = time()\n rates = []\n for task_sets in task_sets_list:\n rates.append(100 * np.average(pool.map(func, task_sets)))\n pickle.dump(rates, open(eval_fairgen_path + name, 'wb'))\n stop = time()\n print('%s: %.3fs' % (name, (stop - start)))", "def _optimize(self) -> None:\n\n for i, agent in enumerate(self.agents):\n states, actions, rewards, next_states, dones = self.memory.sample()\n\n actor_next_state = self._agent_states(i, next_states)\n next_actions = torch.cat(\n [a.actor_target(actor_next_state) for a in self.agents], 1\n )\n next_q = agent.critic_target(next_states, next_actions).detach()\n target_q = rewards[:, i].view(-1, 1) + self.gamma * next_q * (\n 1 - dones[:, i].view(-1, 1)\n )\n local_q = agent.critic_local(states, actions)\n\n value_loss = agent.loss_fn(local_q, target_q)\n agent.value_optimizer.zero_grad()\n value_loss.backward()\n agent.value_optimizer.step()\n\n local_actions = []\n for i, a in enumerate(self.agents):\n local_states = self._agent_states(i, states)\n local_actions.append(\n a.actor_local(local_states)\n if a == agent\n else a.actor_local(local_states).detach()\n )\n local_actions = torch.cat(local_actions, 1)\n policy_loss = -agent.critic_local(states, local_actions).mean()\n\n agent.policy_optimizer.zero_grad()\n policy_loss.backward()\n agent.policy_optimizer.step()\n\n self._update_target_model(agent.critic_local, agent.critic_target)\n self._update_target_model(agent.actor_local, agent.actor_target)", "def eval(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n net.eval()", "def run_functions(self):\n for function in self.functions:\n try:\n function()\n except Exception as err:\n logger.exception(\n f\"[red]Failed running and collecting data for function: {function.__name__}[/red]\"\n )\n logger.error(traceback.format_exc())\n logger.error(f\"[red]{err}[/red]\")\n logger.error(\"Continuing..\")", "def run(self):\n self.membershipFunction()\n self.interpretingMF()\n self.rules()\n self.standardComposition_Min()\n self.standardComposition_Max()\n self.defuzzification()", "def eval_genomes(genomes, config_):\n data = next_batch()\n assert data is not None\n inputs, outputs = data\n inputs = preprocessor(inputs)\n for _, genome in tqdm(genomes):\n net = RecurrentNet.create(genome, config_)\n mse = 0\n for single_inputs, output in zip(inputs, outputs):\n net.reset()\n mask, score = gate_activation(net, single_inputs)\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n mse += (xo - output.item())**2\n genome.fitness = 1 / (1 + mse)", "def __init__(self, dims, treeCount, incAdd = 1, testDims = 3, dimCount = 4, rotCount = 32):\n # Support structures...\n self.cats = dict() # Dictionary from cat to internal indexing number.\n self.treeCount = treeCount\n self.incAdd = incAdd\n \n # Setup the classification forest...\n self.classify = DF()\n self.classify.setInc(True)\n self.classify.setGoal(Classification(None, 1))\n self.classify.setGen(LinearClassifyGen(0, 1, testDims, dimCount, rotCount))\n \n self.classifyData = MatrixGrow()\n self.classifyTrain = self.treeCount\n \n # Setup the density estimation forest...\n self.density = DF()\n self.density.setInc(True)\n self.density.setGoal(DensityGaussian(dims))\n self.density.setGen(LinearMedianGen(0, testDims, dimCount, rotCount))\n self.density.getPruner().setMinTrain(48)\n \n self.densityData = MatrixGrow()\n self.densityTrain = self.treeCount", "def train_all_curated(self, bench=False):\n train_X, train_y = self.format_input(self.M.curated_genes, self.neg_train_genes)\n self.train(train_X, train_y)\n pkl.dump(self, open(self.save_path + '/nash_model_trained.pkl', 'wb'))\n if bench:\n self.benchmark(train_X, train_y)\n\n # do feature selection on dataset as a whole so it is easier to be scored\n if self.feat_sel:\n self.dataset = pd.DataFrame(self.skb.transform(self.dataset), index=self.dataset.index)", "def infer(self):\r\n for i in range(6):\r\n count_before = len(self.graph.nodes)\r\n\r\n self.graph.cleanup().toposort()\r\n try:\r\n for node in self.graph.nodes:\r\n for o in node.outputs:\r\n o.shape = None\r\n model = gs.export_onnx(self.graph)\r\n model = shape_inference.infer_shapes(model)\r\n self.graph = gs.import_onnx(model)\r\n except Exception as e:\r\n log.info(\"Shape inference could not be performed at this time:\\n{}\".format(e))\r\n try:\r\n self.graph.fold_constants(fold_shapes=True)\r\n except TypeError as e:\r\n log.error(\"This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your \"\r\n \"onnx_graphsurgeon module. Error:\\n{}\".format(e))\r\n raise\r\n\r\n count_after = len(self.graph.nodes)\r\n if count_before == count_after:\r\n # No new folding occurred in this iteration, so we can stop for now.\r\n break", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def tuneRandomForest(train_set):\n\n auc_score = make_scorer(roc_auc_score)\n acc = make_scorer(accuracy_score)\n\n train_set = pd.read_csv(train_set, sep=\"\\t\", low_memory=False)\n\n train_output = train_set[\"output\"].values\n train_features = train_set[train_set.columns.drop([\"labels\", \"output\"])].values\n\n #X_train, X_test, y_train, y_test = train_test_split(train_features, train_output, test_size=0.20)\n\n # define parameters to be optimized\n parameters = {\n 'n_estimators': [int(x) for x in range(200, 3000, 300)],\n 'max_features': ['log2', 'sqrt', \"auto\"],\n 'criterion': [\"gini\", \"entropy\"],\n }\n #plotGrid(parameters, script_path + \"/results/GridSearchPlot.png\")\n\n scores = ['precision', 'recall', 'f1', auc_score, acc] # compute efficiency based on scores\n for score in scores:\n print(\"# Tuning hyper-parameters for %s\" % score)\n\n tune_search = GridSearchCV(\n RandomForestClassifier(n_jobs=-1),\n parameters,\n scoring=score\n )\n #tune_search.fit(X_train, y_train)\n tune_search.fit(train_features, train_output)\n print(tune_search.best_params_)\n\n means = tune_search.cv_results_['mean_test_score']\n stds = tune_search.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, tune_search.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\" % (mean, std * 2, params))\n\n #y_true, y_pred = y_test, tune_search.predict(X_test)\n # print(classification_report(y_true, y_pred))\n #print()", "def trainNet():", "def run_algorithm(self):\n print(f\"Checking all possible configurations with {self.algorithm}...\")\n\n if self.algorithm == \"test\" or (self.algorithm == \"greedy\" and\n self.iterations == 1000):\n\n # Test each configuration found with greedy (1000 iterations)\n while True:\n try:\n self.index += 1\n self.batteries = self.load_batteries(self.index)\n\n # Break if all configurations are checked\n except FileNotFoundError:\n break\n self.calculate_cable()\n self.link_houses()\n greedy(self, 1000)\n\n # Load best solution if user wanted to run greedy\n if self.algorithm == \"greedy\":\n self.load()\n self.plot_houses()\n\n # Call correct algorithm\n else:\n self.load()\n if self.algorithm == \"stepdown\":\n stepdown(self)\n elif self.algorithm == \"greedy\":\n greedy(self, self.iterations)\n elif self.algorithm == \"hill\":\n hill_climber(self, self.iterations)\n elif self.algorithm == \"dfs\":\n dfs(self)\n elif self.algorithm == \"random\":\n random_algorithm(self, self.iterations)\n elif self.algorithm == \"bnb\":\n bnb(self)\n\n self.load()\n self.plot_houses()", "def run_isolation_forest(file_path):\n\n features_list = ['Direction', 'Speed']\n df_train = pd.read_csv(f'{file_path}/without_anom.csv')\n\n df_train = df_train[features_list]\n\n scalar = MaxAbsScaler()\n\n X_train = scalar.fit_transform(df_train)\n\n random_model = MultiOutputRegressor(\n RandomForestRegressor(max_depth=2, max_features=\"sqrt\")\n )\n\n # lab_enc = preprocessing.LabelEncoder()\n # training_scores_encoded = lab_enc.fit_transform(X_train)\n random_model.fit(X_train, X_train)\n pred = random_model.predict(X_train)\n # isolation_model = MultiOutputRegressor(IsolationForest()).fit(X_train)\n # pred = isolation_model.predict(X_train)\n test_path = \"C:\\\\Users\\\\Yehuda Pashay\\\\Desktop\\\\fligth_data\\\\data_set\\\\test\\\\chicago_to_guadalajara\\\\down_attack\"\n df_test = pd.read_csv(f'{test_path}/sensors_8.csv')\n df_test = df_test[features_list]\n\n Y_test = scalar.transform(df_test)\n test_pred = random_model.predict(Y_test)\n a = 4", "def _evaluate_fitness(self, population: Population):\n for n, individual in enumerate(population.individuals):\n\n # Dataset extraction using individual features\n X_data = self._create_dataset(individual, self._X)\n\n # Get scores for each fitness strategy (each objective)\n scores = [fitness_func.eval_fitness(X=X_data, y=self._y, num_feats=len(population.features))\n for fitness_func in self.fitness]\n\n # If the number of features is an objective\n if self.optimize_features:\n scores.append(self.features_function(individual=individual,\n total_feats=len(self._population.features)))\n\n # Create a solution\n individual.fitness = Solution(scores)\n\n return population", "def run_all(operations=ops):\n for operation in operations:\n run(operation)", "def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')", "def execute(self, *f_args):\n selection = self._selections[int(math.floor(f_args[0]))].name()\n representation = self._representations[int(\n math.floor(f_args[1]))].name()\n mutation = self._mutations[int(math.floor(f_args[2]))].name()\n crossover = self._crossovers[int(math.floor(f_args[3]))].name()\n\n population = int(round(f_args[4]))\n selection_crossover = f_args[5]\n selection_mutation = f_args[6]\n generations = int(math.floor(f_args[7]))\n precision = int(round(f_args[8]))\n max_retry = int(round(f_args[9]))\n\n values = {}\n args = collections.namedtuple(\n \"args\",\n [\"precision\", \"threads\", \"dimensions\",\n \"selection\", \"representation\", \"crossover\", \"mutation\",\n \"population\", \"selection_mutation\", \"selection_crossover\",\n \"generations\", \"max_retry\"])\n\n for function_cls in self._functions:\n values[function_cls] = {}\n for dimension in range(1, 2):\n # prepare new alg\n alg = basic_ag.BaseAG(\n selection=selection,\n representation=representation,\n mutation=mutation,\n crossover=crossover,\n population=population,\n selection_crossover=selection_crossover,\n selection_mutation=selection_mutation,\n generations=generations,\n dimension=dimension,\n precision=precision)\n\n fabicrated_args = args(\n precision=precision, max_retry=max_retry,\n dimensions=dimension, threads=5,\n selection=selection,\n representation=representation,\n mutation=mutation,\n crossover=crossover,\n population=population,\n selection_crossover=selection_crossover,\n selection_mutation=selection_mutation,\n generations=generations)\n alg.set_args(fabicrated_args)\n\n function_cls.set_args(fabicrated_args)\n function = function_cls(dimension=dimension)\n\n rez = alg(function)\n info = alg.get_info()\n\n values[function_cls][dimension] = (\n rez, function.local_mins, info, fabicrated_args)\n\n return self._get_value(values)", "def eval(self):\n self.train(mode=False)", "def runall():\n sclogic.runall()", "def start_neuroevolution(x, y, x_test, y_test):\n\n connections = [(0, INPUT0, OUTPUT0), (1, INPUT1, OUTPUT0), (2, INPUT0, OUTPUT1), (3, INPUT1, OUTPUT1)]\n genotypes = [{0: True, 1: True, 2: True, 3: True} for d in xrange(5)]\n\n for its in xrange(0,5):\n print \"iteration\", its\n\n fitnesses = []\n # test networks\n for i in xrange(0,len(genotypes)):\n fitnesses.append(eval_fitness(connections, genotypes[i], x, y, x_test, y_test, run_id=str(its) + \"/\" + str(i)))\n\n # get indices of sorted list\n fitnesses_sorted_indices = [i[0] for i in reversed(sorted(enumerate(fitnesses), key=lambda x: x[1]))]\n\n print \"connections:\\n\"\n print connections\n for ra in xrange(0,len(fitnesses_sorted_indices)):\n print fitnesses[fitnesses_sorted_indices[ra]], genotypes[fitnesses_sorted_indices[ra]]\n\n # run evolutions\n # todo: fiddle with parameters, include size of network in fitness?\n new_gen = []\n # copy five best survivors already\n m = 5\n if m > len(fitnesses):\n m = len(fitnesses)\n\n for i in xrange(0,m):\n print \"adding:\", fitnesses[fitnesses_sorted_indices[i]], genotypes[fitnesses_sorted_indices[i]]\n new_gen.append(genotypes[fitnesses_sorted_indices[i]])\n\n for i in xrange(0,len(fitnesses_sorted_indices)):\n fi = fitnesses_sorted_indices[i]\n r = np.random.uniform()\n # select the best for mutation and breeding, kill of worst.\n if r <= 0.2:\n # mutate\n connections, gen = add_connection(connections, genotypes[i])\n new_gen.append(gen)\n r = np.random.uniform()\n if r <= 0.5:\n connections, gen = add_node(connections, genotypes[i])\n new_gen.append(gen)\n\n r = np.random.uniform()\n if r <= 0.1:\n # select random for breeding\n r = np.random.randint(0,len(fitnesses))\n r2 = np.random.randint(0,len(fitnesses) - 1)\n if r2 >= r:\n r2 +=1\n gen = crossover(connections, genotypes[r], fitnesses[r], genotypes[r2], fitnesses[r2])\n new_gen.append(gen)\n new_gen.append(genotypes[fi])\n # stop if we have 5 candidates\n if len(new_gen) > 10:\n break\n genotypes = new_gen", "def __init__(self, n_trees=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, \n max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,\n verbose=0, min_density=None, compute_importances=None): \n self.random_forest = RandomForestClassifier(n_trees, criterion, max_depth, min_samples_split, min_samples_leaf, \n max_features, max_leaf_nodes, bootstrap, oob_score, n_jobs, random_state,\n verbose, min_density, compute_importances)", "def start_all_nodes(self):\n for node in self.nodes:\n node.start()", "def main(args, base_dir):\n for i in range(args.n_training):\n # value of the next seed\n seed = args.seed + i\n\n # The time when the current experiment started.\n now = strftime(\"%Y-%m-%d-%H:%M:%S\")\n\n # Create a save directory folder (if it doesn't exist).\n if args.log_dir is not None:\n dir_name = args.log_dir\n else:\n dir_name = os.path.join(base_dir, '{}/{}'.format(\n args.env_name, now))\n ensure_dir(dir_name)\n\n # Get the policy class.\n if args.alg == \"TD3\":\n from hbaselines.multiagent.td3 import MultiFeedForwardPolicy\n elif args.alg == \"SAC\":\n from hbaselines.multiagent.sac import MultiFeedForwardPolicy\n elif args.alg == \"PPO\":\n from hbaselines.multiagent.ppo import MultiFeedForwardPolicy\n elif args.alg == \"TRPO\":\n from hbaselines.multiagent.trpo import MultiFeedForwardPolicy\n else:\n raise ValueError(\"Unknown algorithm: {}\".format(args.alg))\n\n # Get the hyperparameters.\n hp = get_hyperparameters(args, MultiFeedForwardPolicy)\n\n # add the seed for logging purposes\n params_with_extra = hp.copy()\n params_with_extra['seed'] = seed\n params_with_extra['env_name'] = args.env_name\n params_with_extra['policy_name'] = \"MultiFeedForwardPolicy\"\n params_with_extra['algorithm'] = args.alg\n params_with_extra['date/time'] = now\n\n # Add the hyperparameters to the folder.\n with open(os.path.join(dir_name, 'hyperparameters.json'), 'w') as f:\n json.dump(params_with_extra, f, sort_keys=True, indent=4)\n\n run_exp(\n env=args.env_name,\n policy=MultiFeedForwardPolicy,\n hp=hp,\n dir_name=dir_name,\n evaluate=args.evaluate,\n seed=seed,\n eval_interval=args.eval_interval,\n log_interval=args.log_interval,\n save_interval=args.save_interval,\n initial_exploration_steps=args.initial_exploration_steps,\n ckpt_path=args.ckpt_path,\n )", "def do_training(self):\n json_data = request.data\n global g_list_of_classifier\n\n datas = json.loads(json_data.decode('UTF-8')) #datas = liste\n\n for ite_clf in g_list_of_classifier:\n for data in datas:\n ite_clf.add_data(data['score'], data['answer'])\n print(ite_clf.get_info())\n return ''", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')", "def set_eval(self):\n for m in self.models.values():\n m.eval()", "def _next_generation(self, previous_generation):\n self._fullInput, self._fullOutput = previous_generation.get_data()\n self.power = self.settings.population_count\n for forest_iteration in range(self.power):\n first, second = previous_generation.selection()\n print 'selected for crossover ->', first.fitness, second.fitness\n self._forests.append(OneForest(self.settings, first_forest=first, second_forest=second))", "def go(self):\n\n if self.problem_type == 1:\n print magenta(\"|\" * 20 + \" And they're off! \" + \"|\" * 20)\n # classification\n c = ClassifyModel(self)\n c.logistic_regression()\n c.knn()\n c.random_forest()\n\n elif self.problem_type == 2:\n # regression\n pass\n\n elif self.problem_type == 3:\n # clustering\n pass\n\n elif self.problem_type == 4:\n # dimensionailty reduction\n pass\n\n elif self.problem_type == 5:\n # recommendation\n pass\n\n else:\n raise ValueError('Problem type not defined properly.')", "def main():\n path_for_data = '/Users/avielshtern/Desktop/semb/iml/IML.HUJI-master/data/kc_house_data (1).csv'\n design_matrix, response_vector = load_data(path_for_data)\n putting_it_all_together_1(design_matrix, response_vector)\n putting_it_all_together_2(design_matrix, response_vector)\n feature_evaluation(design_matrix, response_vector)", "def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances", "def run(self, verbose=True, max_total_iterations=50000):\n self.verbose = verbose\n\n # Upper bounds on number of evaluations\n self.max_total_iterations = max_total_iterations\n\n self.initialise_mean_and_count()\n self.directed_edges = []\n self.active_strategy_profiles = []\n self.initialise_queue()\n\n # Forced initial exploration\n self.forced_exploration()\n\n # Keep evaluating nodes until check method declares that we're finished\n iterations = 0\n edges_resolved_this_round = []\n while self.total_interactions < max_total_iterations:\n # Add nodes to queue\n self.add_to_queue(removed=edges_resolved_this_round)\n\n # Evaluate the nodes and log results\n for v, _ in self.evaluate_strategy_profile():\n if verbose:\n print(v)\n\n # Recompute confidence bounds, eliminate, stop etc.\n edges_resolved_this_round = self.check_confidence()\n\n if not self.edges_remaining:\n break\n iterations += 1\n\n # Fill in missing edges if max iters reached without resolving all edges\n self.compute_graph()\n\n # Compute objects to be returned\n if verbose:\n total_steps = self.compute_total_steps()\n print('\\nTotal steps taken = {}'.format(total_steps))\n results = {}\n results['interactions'] = int(np.sum(self.count[0]))\n graph = self._construct_digraph(self.directed_edges)\n results['graph'] = graph\n return results", "def evolve(self, generations=10000):\n\n for gen in range(generations):\n # run the tournament\n self.tournament()\n\n # generate the next generation\n self.p = self.nextGen()", "def run(self):\n # For each microenvironment that the person visits\n while self.routing_node_id != 'end':\n # Get the next node, and this activity class and arguments.\n self.routing_node_id, activity_class, kwargs = self.routing.get_next_activity(self.routing_node_id)\n\n # Add this instance to the arguments list\n kwargs['person'] = self\n\n # Create a parametrised instance of the activity\n this_activity_class = activity_class(self.simulation_params, **kwargs)\n \n # set an event flag to mark end of activity and call the activity class\n finished_activity = self.env.event() \n self.env.process(this_activity_class.start(finished_activity))\n yield finished_activity", "def run(self):\n print(' strategies...')\n matrix_file = ''\n matrix_s, matrix_c = None, None\n # run for all but the optimal version\n item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n gt_graph = load_graph(graph)\n for strategy in Strategy.strategies:\n if strategy == 'optimal':\n continue\n print(' ', strategy)\n m_new = self.data_set.matrices[rec_type][graph][strategy][0]\n m_newc = self.data_set.matrices[rec_type][graph][strategy][1]\n debug(' ----', m_new)\n debug(' ----', m_newc)\n if not m_new:\n debug(' ---- not m_new')\n matrix_s, matrix_c, matrix_file = None, None, None\n elif matrix_file != m_new:\n matrix_s = SimilarityMatrix(item2matrix, m_new)\n matrix_c = SimilarityMatrix(item2matrix, m_newc)\n matrix_file = m_new\n debug(' ---- matrix_file != m_new')\n # for miss in self.data_set.missions[rec_type][graph][strategy]:\n for miss in Mission.missions:\n print(' ', miss)\n if 'Information Foraging' in miss or 'Berrypicking' in miss:\n matrix = matrix_c\n else:\n matrix = matrix_s\n for m in self.data_set.missions[rec_type][graph][strategy][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/',\n len(m.targets_original))\n debug(m.targets_original[ti])\n self.navigate(gt_graph, strategy, m, start,\n None, matrix)\n if ti > 0 and len(m.targets_original[ti]) == len(m.targets[0]):\n # print('breaking...')\n m.reset()\n break\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # run the simulations for the optimal solution\n print(' optimal...')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n sp_file = graph.rsplit('.', 1)[0] + '.npy'\n with open(sp_file, 'rb') as infile:\n sp = pickle.load(infile)\n for miss in self.data_set.missions[rec_type][graph]['optimal']:\n for m in self.data_set.missions[rec_type][graph]['optimal'][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/', len(m.targets_original))\n debug(m.targets_original[ti])\n self.optimal_path(m, start, sp)\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # # DEBUG\n # item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n # for rec_type in ['rbar']:\n # for graph in self.data_set.graphs[rec_type]:\n # print(' ', graph)\n # gt_graph = load_graph(graph)\n # sp_file = graph.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file, 'rb') as infile:\n # sp = pickle.load(infile)\n # m_newc = self.data_set.matrices[rec_type][graph]['title'][1]\n # matrix = SimilarityMatrix(item2matrix, m_newc)\n # sc = 'Berrypicking'\n # mc1 = self.data_set.missions[rec_type][graph]['title'][sc]\n # mc2 = self.data_set.missions[rec_type][graph]['optimal'][sc]\n # mc3 = self.data_set.missions[rec_type][graph]['random'][sc]\n # for m1, m2, m3 in zip(\n # mc1,\n # mc2,\n # mc3\n # ):\n # # evalute with title strategy\n # for ti in xrange(len(m1.targets_original)):\n # start = m1.path[-2] if m1.path else m1.start\n # debug('++++' * 16, 'mission', ti, '/', len(m1.targets_original))\n # # debug(m1.targets_original[ti])\n # self.navigate(gt_graph, 'title', m1, start, None, matrix)\n # # print(m1.path, ti, len(m1.targets_original[ti]), len(m1.targets[0]))\n # if ti > 0 and len(m1.targets_original[ti]) == len(m1.targets[0]):\n # # print('breaking...')\n # m1.reset()\n # break\n # if not (ti + 1) == len(m1.targets_original):\n # m1.path.append(u'*')\n # m1.reset()\n #\n # # evaluate with optimal strategy\n # for ti in xrange(len(m2.targets_original)):\n # start = m2.path[-2] if m2.path else m2.start\n # # debug('++++' * 16, 'mission', ti, '/', len(m2.targets_original))\n # # debug(m2.targets_original[ti])\n # self.optimal_path(m2, start, sp)\n # if not (ti + 1) == len(m2.targets_original):\n # m2.path.append(u'*')\n # m2.reset()\n # # pdb.set_trace()\n #\n # # if len(m1.path) < len(m2.path):\n # # print(len(m1.path), len(m2.path))\n # # pdb.set_trace()\n # # m1.compute_stats()\n # # m2.compute_stats()\n # # if m1.stats[-1] > m2.stats[-1]:\n # # print(m1.stats)\n # # print(m2.stats)\n # # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc1.compute_stats()\n # mc2.compute_stats()\n # print(mc1.stats[-1], mc2.stats[-1])\n # pdb.set_trace()\n\n # fname_5 = u'../data/bookcrossing/graphs/rbar_5.gt'\n # fname_20 = u'../data/bookcrossing/graphs/rbar_20.gt'\n # sp_file_5 = fname_5.rsplit('.', 1)[0] + '.npy'\n # sp_file_20 = fname_20.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file_5, 'rb') as infile:\n # sp_5 = pickle.load(infile)\n # with open(sp_file_20, 'rb') as infile:\n # sp_20 = pickle.load(infile)\n # sc = 'Berrypicking'\n # mc_5 = self.data_set.missions['rbar'][fname_5]['optimal'][sc]\n # mc_52 = self.data_set.missions['rbar'][fname_5]['title'][sc]\n # mc_20 = self.data_set.missions['rbar'][fname_20]['optimal'][sc]\n # mc_202 = self.data_set.missions['rbar'][fname_20]['title'][sc]\n # for m5, m20, m52, m202 in zip(\n # mc_5,\n # mc_20,\n # mc_52,\n # mc_202\n # ):\n # # evaluate 5 with optimal strategy\n # for ti in xrange(len(m5.targets_original)):\n # start = m5.path[-2] if m5.path else m5.start\n # self.optimal_path(m5, start, sp_5)\n # if not (ti + 1) == len(m5.targets_original):\n # m5.path.append(u'*')\n # m5.reset()\n #\n # # evaluate 20 with optimal strategy\n # for ti in xrange(len(m20.targets_original)):\n # start = m20.path[-2] if m20.path else m20.start\n # self.optimal_path(m20, start, sp_20)\n # if not (ti + 1) == len(m20.targets_original):\n # m20.path.append(u'*')\n # m20.reset()\n #\n # # if len(m5.path) < len(m20.path) or \\\n # if m5.path.count('*') > m20.path.count('*'):\n # print(len(m5.path))\n # for part in ' '.join(m5.path[2:]).split('*'):\n # print(' ', part)\n # print(len(m20.path))\n # for part in ' '.join(m20.path[2:]).split('*'):\n # print(' ', part)\n # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc_5.compute_stats()\n # mc_20.compute_stats()\n # print(mc_5.stats[-1], mc_20.stats[-1])\n #\n # for m5, m20 in zip(mc_5.missions, mc_20.missions):\n # if m5.stats[-1] > m20.stats[-1]:\n # print(m5.stats)\n # print(m20.stats)\n # pdb.set_trace()\n # pdb.set_trace()\n\n # write the results to a file\n # self.write_paths()\n self.save()", "def runAllGLMS(self):\n\t\tfor condition in ['WMM']:\n\t\t\tfor run in self.conditionDict[condition]:\n\t\t\t\t\n\t\t\t\t# remove previous feat directories\n\t\t\t\ttry:\n\t\t\t\t\tself.logger.debug('rm -rf ' + self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'], extension = '.feat'))\n\t\t\t\t\tos.system('rm -rf ' + self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'], extension = '.feat'))\n\t\t\t\t\tos.system('rm -rf ' + self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'], extension = '.fsf'))\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\t\n\t\t\t\t# this is where we start up fsl feat analysis after creating the feat .fsf file and the like\n\t\t\t\tthisFeatFile = '/home/moorselaar/WMM_PRF/analysis/analysis.fsf'\n\t\t\t\tREDict = {\n\t\t\t\t#'---OUTPUT_DIR---':self.runFile(stage = 'processed/mri', run = r, postFix = ['mcf', 'sgtf']),\n\t\t\t\t'---NR_TRS---':str(NiftiImage(self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'])).timepoints),\n\t\t\t\t'---FUNC_FILE---':self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf']), \n\t\t\t\t'---CONFOUND_EV---':self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf'], extension='.par'), \n\t\t\t\t# '---ANAT_FILE---':os.path.join(os.environ['SUBJECTS_DIR'], self.subject.standardFSID, 'mri', 'bet', 'T1_bet' ), \n\t\t\t\t'---STIM_FILE---':self.runFile(stage = 'processed/behavior', run = self.runList[run], postFix = ['stim_all'], extension='.txt'),\n\t\t\t\t'---RESPONSE_FILE---':self.runFile(stage = 'processed/behavior', run = self.runList[run], postFix = ['resp_all'], extension='.txt'),\n\t\t\t\t'---PPU_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['ppu'], extension='.txt'),\n\t\t\t\t'---PPU_R_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['ppu','raw'], extension='.txt'),\n\t\t\t\t'---RESP_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['resp'], extension='.txt'),\n\t\t\t\t'---RESP_R_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['resp','raw'], extension='.txt')\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfeatFileName = self.runFile(stage = 'processed/mri', run = self.runList[run], extension = '.fsf')\n\t\t\t\tfeatOp = FEATOperator(inputObject = thisFeatFile)\n\t\t\t\t# no need to wait for execute because we're running the mappers after this sequence - need (more than) 8 processors for this, though.\n\t\t\t\tif self.runList[run] == [self.runList[i] for i in self.conditionDict['WMM']][-1]:\n\t\t\t\t\tfeatOp.configure( REDict = REDict, featFileName = featFileName, waitForExecute = True )\n\t\t\t\telse:\n\t\t\t\t\tfeatOp.configure( REDict = REDict, featFileName = featFileName, waitForExecute = False )\n\t\t\t\tself.logger.debug('Running feat from ' + thisFeatFile + ' as ' + featFileName)\n\t\t\t\t# run feat\n\t\t\t\tfeatOp.execute()", "def crawler(self):\n\n\t\tfor page in range(self.first_page, self.last_page+1):\n\t\t\tprint(\"\\nCrawling Page \" + str(page))\n\t\t\tpage_url = self.site_url + \"?page=\" + str(page) +\\\n\t\t\t \"&index=prod_all_products_term_optimization\"\n\t\t\t\n\t\t\tself.scrape_features(page_url)", "def launch_evaluations(self):\n self.report('Launching pending evaluations.')\n with self.optimizer() as opt:\n evals = {}\n evaluate_process = load_object(self.inputs.evaluate_process.value)\n for idx, inputs in opt.create_inputs().items():\n self.report('Launching evaluation {}'.format(idx))\n inputs_merged = ChainMap(inputs, self.inputs.get('evaluate', {}))\n if is_process_function(evaluate_process):\n _, node = run_get_node(evaluate_process, **inputs_merged)\n else:\n node = self.submit(evaluate_process, **inputs_merged)\n evals[self.eval_key(idx)] = node\n self.indices_to_retrieve.append(idx)\n return self.to_context(**evals)", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def learner(self):\n for N in range(self.N_learn):\n trajectories = self.sample_trajectories()\n\n # TODO: Both these methods take the full trajectories at the moment, a speedup could be achieved here\n self.qmodel.train(trajectories)\n self.amodel.train(trajectories)", "def train(self):\n\n if(self.net.killAll):\n self._kill()\n\n empty = False\n state = []\n actions = []\n rewards = []\n while(not empty):\n example = self.globalQueue.get()\n \n for prevState, action, reward in zip(example['prevStates'], example['actions'],example['rewards']):\n state.append(np.array(prevState).reshape(-1,84,84,4))\n actions.append(np.eye(self.actionSpace)[np.array(action)].reshape(-1,self.actionSpace).astype(np.float32))\n rewards.append(np.array(reward).reshape(-1))\n empty = self.globalQueue.empty()\n \n if(len(rewards) != 0 ):\n states = np.array(state).reshape(-1, 84,84,4)\n actions = np.array(actions).reshape(-1,self.actionSpace)\n rewards = np.array(rewards).reshape(-1)\n self.net.train(states, rewards, actions)", "def train_random_forest():\n train_model(RandomForestRegressor(max_depth=4, random_state=42),\n dataset_file_name=RANDOM_FOREST_DEFAULT_DATASET,\n model_file_name=RANDOM_FOREST_DEFAULT_MODEL)", "def apply(self, decision_graph: DecisionGraph) -> None:\n #takes decision graph and applies them to processors\n self.apply_random(False)\n logging.debug(\"application1:\", self.application)\n logging.debug(\"connections1:\", self.transfer)\n\n Procedures.instance.set_application(self.application)\n\n all_tasks = self.task_graph.nodes\n all_connections = [i for sub in self.transfer for i in sub if i]\n for n in decision_graph.DFS():\n #pick tasks for nodes keeping in mind propabilities and graph structure\n node = decision_graph.find_node(n)\n parent = decision_graph.find_parents(n)\n logging.debug(node, parent)\n\n if parent:\n #If node has parent use it's tasks to pick from\n parent = decision_graph.find_node(parent[0])\n picked_tasks = random.sample(parent.tasks, k=round(node.propability * len(parent.tasks)))\n assert len(picked_tasks) <= len(parent.tasks)\n picked_connections = random.sample(parent.connections, k=round(node.propability * len(parent.connections)))\n assert len(picked_connections) <= len(parent.connections)\n else:\n #If node has no parents pick random elements from all tasks according to propability value\n picked_tasks = random.sample(all_tasks, k=round(node.propability * len(all_tasks)))\n picked_connections = random.sample(all_connections, k=round(node.propability * len(all_connections)))\n\n node.tasks = picked_tasks\n node.connections = picked_connections\n\n #pick tasks according to operation (strategy) in this node\n for task in picked_tasks:\n self.move_task(task.label, node.task_strategy(task.label))\n\n #pick connection according to comm (strategy) in this node\n for conn in picked_connections:\n picked = node.comm_strategy(self.transfer)\n conn = self.alter_connection(conn, picked)\n\n logging.debug(\"application2:\", self.application)\n logging.debug(\"connections2:\", self.transfer)\n\n self.sort_tasks_with_critical_order()\n return self.simulate()", "def evaluate_ucf50_fusion():\n accs = np.zeros(3)\n ob_suffix = '-max.feat.npy.gz'\n fv_suffix = '_fv.npy.gz'\n ob_root = '/home/syq/research_final/data/features/ob_ucf50_pooled_python/'\n fv_root = '/home/syq/research_final/data/features/fv_ucf50_python/'\n fv_groups, full, sets = utility.split_data(fv_root,\n suffix=fv_suffix,\n useLooCV=False)\n\n ob_groups, _, _ = utility.split_data(ob_root,\n suffix=ob_suffix,\n useLooCV=False)\n weights = [i / 20.0 for i in range(8, 13)]\n acc_to_weights = {}\n\n for weight in weights:\n print \"Weight: %.2f\" % weight\n accs = np.zeros(2)\n for i in xrange(2):\n ts = time.time()\n Dtrain_fv, Dtest_fv, Ytrain, Ytest = utility.load_groups(\n fv_groups, np.setdiff1d(full, sets[i]),\n sets[i], scale=False, verbose=False)\n\n Dtrain_ob, Dtest_ob, Ytrain, Ytest = utility.load_groups(\n ob_groups, np.setdiff1d(full, sets[i]),\n sets[i], scale=False, verbose=False)\n\n fv_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n\n ob_clf = OneVsRestClassifier(estimator=SVC(C=10,\n cache_size=1000,\n kernel='linear',\n probability=True),\n n_jobs=-1)\n\n # Get probabilities for late fusion\n Dtrain_fv = fv_clf.fit(Dtrain_fv, Ytrain).decision_function(Dtrain_fv)\n Dtrain_ob = ob_clf.fit(Dtrain_ob, Ytrain).decision_function(Dtrain_ob)\n Dtest_fv = fv_clf.decision_function(Dtest_fv)\n Dtest_ob = ob_clf.decision_function(Dtest_ob)\n\n # Scale decision values b/w 0 and 1\n Dtrain_fv = preprocessing.normalize(Dtrain_fv)\n Dtrain_ob = preprocessing.normalize(Dtrain_ob)\n Dtest_fv = preprocessing.normalize(Dtest_fv)\n Dtest_ob = preprocessing.normalize(Dtest_ob)\n\n # weighted averaging\n scores_train = (Dtrain_fv * weight) + (Dtrain_ob * (1 - weight))\n latefusion_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=-1)\n latefusion_clf.fit(scores_train, Ytrain)\n\n scores_test = (Dtest_fv * weight) + (Dtest_ob * (1 - weight))\n latefusion_acc = latefusion_clf.score(scores_test, Ytest)\n print 'Fold', i, 'late fusion acc', latefusion_acc\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs[i] = latefusion_acc\n\n print \"Mean accuracy: %.3f\" % accs.mean()\n with open(\"fv_ucf50_accs_5fold_gridsearch.txt\", \"w\") as f:\n for weight, accs in acc_to_weights.items():\n f.write(str(weight) + str(accs) + '\\n')\n return acc_to_weights", "def runner(self):\n\n print('[ INFO ]: Initializing the abalone program runner...')\n\n df, features, predictor, classes = self.preprocess()\n\n df = alg.random_feature_sample(self, df, 0.10)\n\n # Set up the training, testing and validation sets\n split = round(len(df) * 0.10)\n v_set = df[df.index < split]\n t_set = df[df.index >= split]\n\n tree = alg()\n folds_dict = tree.cross_validation(t_set, predictor, type='classification', folds=5)\n\n # Initialize comparion values\n best_fold_tree = None\n best_fold_score = 0\n best_fold_pred_labels = None\n best_fold_df = None\n\n # Loop through each fold in the folds dictionary\n for fold in folds_dict:\n\n test_set = folds_dict[fold]\n train_set = pd.DataFrame()\n for inner_fold in folds_dict:\n if inner_fold != fold:\n train_set = train_set.append(folds_dict[inner_fold], ignore_index=True)\n\n # Build an ID3 tree\n root = tree.build_tree(train_set, features, predictor)\n df, labels, pred_labels, score = tree.test(test_set, features, predictor, root)\n\n # Determine which tree is the best\n if score > best_fold_score:\n best_fold_tree = root\n best_fold_score = score\n best_fold_pred_labels = pred_labels\n best_fold_df = df\n\n # Validate results and prune the ID3 tree\n v_tree = alg()\n df, labels, pred_labels, score = v_tree.test(v_set, features, predictor, best_fold_tree)\n prune_root = v_tree.prune(df, predictor, best_fold_tree)\n prune_df, prune_labels, prune_pred_labels, prune_score = v_tree.test(v_set, features, predictor, prune_root)\n\n return best_fold_tree, score, labels, pred_labels, prune_root, prune_score, prune_labels, prune_pred_labels", "def run_experiment(self):\n\n start_time = time.time()\n\n strategy_instance = None\n if (self.strategy == 'ccegp'):\n strategy_instance = CCEGPStrategy(self)\n else:\n print('strategy unknown:', self.strategy)\n sys.exit(1)\n\n # For each run...\n for curr_run in range(1, self.num_runs_per_experiment + 1):\n\n # Update log\n self.curr_run = curr_run\n print('\\nRun', curr_run)\n self.log_file.write('\\nRun ' + str(curr_run) + '\\n')\n\n # Execute one run and get best values.\n attacker_run_high_fitness, attacker_run_best_world_data, attacker_run_best_solution, \\\n defender_run_high_fitness, defender_run_best_solution, attacker_dot, defender_dot \\\n = strategy_instance.execute_one_run()\n\n print('\\nBest attacker tree of run:\\n' + attacker_run_best_solution)\n if (self.print_dots):\n print('\\nBest attacker dot of run:\\n' + str(attacker_dot))\n print('\\nBest defender tree of run:\\n' + defender_run_best_solution)\n if (self.print_dots):\n print('\\nBest defender dot of run:\\n' + str(defender_dot))\n\n # If best of run is best overall, update appropriate values\n if (self.strategy != 'ccegp'):\n if (attacker_run_high_fitness > self.attacker_exp_high_fitness):\n self.attacker_exp_high_fitness = attacker_run_high_fitness\n print('New exp Attacker high fitness: ', self.attacker_exp_high_fitness)\n self.attacker_exp_best_world_data = attacker_run_best_world_data\n self.attacker_exp_best_solution = attacker_run_best_solution\n self.attacker_exp_best_dot = attacker_dot\n # If Competitive Co-evolution, add fitnesses (use Attacker to store most data)\n else:\n if ((attacker_run_high_fitness + defender_run_high_fitness) > self.attacker_exp_high_fitness):\n self.attacker_exp_high_fitness = (attacker_run_high_fitness + defender_run_high_fitness)\n print('New exp Attacker+Defender high fitness: ', self.attacker_exp_high_fitness)\n self.attacker_exp_best_world_data = attacker_run_best_world_data\n self.attacker_exp_best_solution = attacker_run_best_solution\n self.defender_exp_best_solution = defender_run_best_solution\n self.attacker_exp_best_dot = attacker_dot\n self.defender_exp_best_dot = defender_dot\n\n # Dump best world to file\n the_file = open(self.high_score_world_file_path, 'w')\n for line in self.attacker_exp_best_world_data:\n the_file.write(line)\n the_file.close()\n\n # Dump best Attacker solution (text) to file\n the_file = open(self.attacker_solution_file_path, 'w')\n the_file.write(self.attacker_exp_best_solution)\n the_file.close()\n\n # Dump best Defender solution (text) to file\n if (self.strategy == 'ccegp'):\n the_file = open(self.defender_solution_file_path, 'w')\n the_file.write(self.defender_exp_best_solution)\n the_file.close()\n\n # Dump best Attacker solution (dot) to file\n the_file = open(self.attacker_solution_dot_path, 'w')\n the_file.write(str(self.attacker_exp_best_dot))\n the_file.close()\n\n # Dump best Defender solution (dot) to file\n if (self.strategy == 'ccegp'):\n the_file = open(self.defender_solution_dot_path, 'w')\n the_file.write(str(self.defender_exp_best_dot))\n the_file.close()\n\n # Dump and display best Attacker solution\n if (self.render_solutions):\n self.attacker_exp_best_dot.render(filename=self.attacker_solution_png_path,\n view=self.attacker_open_png,\n format='png')\n\n # Dump and display best Defender solution\n if (self.render_solutions and self.strategy == 'ccegp'):\n self.defender_exp_best_dot.render(filename=self.defender_solution_png_path,\n view=self.defender_open_png,\n format='png')\n\n # Close out the log file\n if (not(self.log_file is None)):\n self.log_file.close()\n\n print(time.time() - start_time, 'seconds')", "def main():\n args = parameter_parser()\n tab_printer(args)\n trainer = GPNTrainer(args)\n # trainer.fit()\n \"\"\"\n Scoring on the prediction and learning ability.\n \"\"\"\n trainer.score()\n \"\"\"\n Scoring on the subgraph test set.\n \"\"\"\n # trainer.score2()\n \"\"\"\n Scoring on the generalization ability.\n \"\"\"\n # trainer.score3()\n \"\"\"\n Finetuning for downstream tasks.\n \"\"\"\n # model = finetune_GPN(args, trainer.number_of_labels)\n # model.finetune()", "def compute_all(self) -> None:\n self.compute_j_matrix()\n self.compute_outter_distribution()\n self.compute_max_prior()\n self.compute_max_poutter()", "def start(self):\n for trial in self._trials:\n self._run(trial)" ]
[ "0.66012484", "0.62504995", "0.61932045", "0.6135642", "0.6134355", "0.61263996", "0.6049594", "0.5867963", "0.5865974", "0.5832648", "0.581788", "0.5802029", "0.57808244", "0.5779419", "0.5708533", "0.56901664", "0.5634386", "0.56181246", "0.558044", "0.553923", "0.55354095", "0.55076337", "0.5501047", "0.546172", "0.54554665", "0.5449654", "0.54465944", "0.5430582", "0.54302466", "0.5425708", "0.54240423", "0.54049075", "0.53903145", "0.53903145", "0.53879577", "0.5372477", "0.537163", "0.5371445", "0.536546", "0.5364046", "0.53615", "0.5349199", "0.5348576", "0.5347975", "0.5347421", "0.53442204", "0.53416", "0.5340558", "0.533911", "0.5332747", "0.53245175", "0.5317703", "0.53116584", "0.5308252", "0.529818", "0.5285249", "0.52834314", "0.5280737", "0.5258904", "0.525514", "0.5253242", "0.5235794", "0.5225982", "0.52235615", "0.52227396", "0.5217332", "0.52070844", "0.5204109", "0.52016765", "0.5187104", "0.5180868", "0.5170577", "0.5167422", "0.5165838", "0.5165298", "0.5158282", "0.51581365", "0.51535106", "0.51534796", "0.51476747", "0.5146464", "0.51380193", "0.513666", "0.5131762", "0.51260483", "0.5123776", "0.51139426", "0.5107123", "0.510546", "0.5100614", "0.5097922", "0.5097633", "0.50936204", "0.50892085", "0.5084416", "0.5073824", "0.50726", "0.5072061", "0.5070364", "0.5070096" ]
0.7236054
0
selecting one point in probability gist
def select_by_prob(self): ball = random() stop_sector = 0 for sector in self.roulet: ball -= sector if ball < 0: return stop_sector else: stop_sector += 1 return stop_sector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pick_one(self):\n index = 0\n r = random.random()\n while r >= 0:\n r = r - self.normalised_fitness[index]\n index += 1\n index -= 1\n return self.population[index]", "def selection(probs):\n # pick 2 parents out of this distribution\n t = [i for i in range(len(probs))]\n draw = choice(t, 2, p=probs, replace=False)\n return draw", "def pick(self, target: int) -> int:\n\t\tans = None\n cnt = 0\n for i, x in enumerate(self.nums): \n if x == target: \n cnt += 1\n if randint(1, cnt) == cnt: ans = i # prob 1/cnt\n return ans", "def prob_choice(p):\n \n return np.random.random_sample() < p", "def samplepoint(x,u):\n return point(x)", "def probchoice(V, d, obs=[]):\n\n #d = 0.01\n #obs = []\n #V = array([0., 0., 0.2, 0.2, 0.2, 0.4])\n\n #top = [exp(d*v) for v in V]\n top = exp(V * (1./d))\n\n #print top\n #print dummy\n\n # set the value of any prior observations to zero\n for i in range(len(obs)): top[obs[i][0]] = 0.\n\n bottom = sum(top)\n cp = [t/bottom for t in top]\n\n r = random()\n #print r\n #print cumsum(cp)\n\n return where((1*(r < cumsum(cp)))==1)[0][0]\n\n #return sum(1*(random() < cumsum(cp)))-1", "def fitness(individual):\n different_pos = 0\n return different_pos", "def sample_one(self):\n # x = self.mean + self.sigma * np.random.normal()\n x = self.dist.sample(1)\n return x", "def prob1(n):\n#raise NotImplementedError(\"Problem 1 Incomplete\")\n if n == 0 :\n raise ValueError(\"Sampling 0 points is not defined.\")\n total = 0\n for i in xrange(n) :\n if np.random.normal() > 3 :\n total += 1\n return float(total)/n", "def rand_zero_or_one(one_prob):\n if random.random() < one_prob:\n return 1\n return 0", "def p(self) -> Probability:\n ...", "def _get_sample(self, p: float) -> np.ndarray:\n return np.where(self.rand_array >= p, 0, 1)", "def probability(p):\n return p > random.uniform(0.0, 1.0)", "def mutate_random(self, point, population):\n other = Point(self.model.generate())\n other.evaluate(self.model)\n while other in population or other == point:\n other = Point(self.model.generate())\n other.evaluate(self.model)\n return other", "def selection(self):\n\n # sort the generation according to fitness.\n self.sortByFitness()\n # get the fitness sum.\n fitnessSum = 0\n for outfit in self.currentGeneration:\n fitnessSum += self.applyFitness(outfit)\n # generate a random number\n stop = random.uniform(0, 1)\n accumulated = 0\n offset = 0\n for outfit in self.currentGenerationSorted:\n fitness = self.applyFitness(outfit) + offset\n probability = fitness / fitnessSum\n accumulated += probability\n\n if stop <= accumulated:\n return outfit", "def probability(problem, train_ixs, obs_labels, selected_ixs, batch_size, **kwargs):\n points = problem['points']\n model = problem['model']\n\n test_X = points[selected_ixs]\n\n p_x = model.predict_proba(test_X)\n\n return p_x[:,1].reshape(-1)", "def propose(x, jump = 0.1):\n\treturn (x[0] + random.gauss(0, jump), x[1] + random.gauss(0, jump))", "def prob(x):\n\treturn 1. * bivariate_normal(x, (0., 1.2), (1., 1.), .8) + \\\n\t 1.05 * bivariate_normal(x, (.6, -1.), (1.3, .7), -.6)", "def probability(prob):\n return random.random() <= prob", "def sample(cdf):\n p = rand()\n #this line is for rounding errors which will cause binary_search to return\n #an index that is out of bounds\n if p == 1.0:\n return cdf[-1]\n else:\n return binary_search(cdf, p)", "def sample(self, seg_logit, seg_label):", "def prior_sample(self, bn):\n x = np.zeros(3)\n\n # first joint prob\n random_choice = np.random.choice(bn[0], 1, bn[0].all(), bn[0])\n x[0] = random_choice[0]\n\n # Second Joint Prob\n if x[0] == 0.1:\n random_choice = np.random.choice(bn[1][0], 1, bn[1][0].all(), bn[1][0])\n x[1] = random_choice\n elif x[0] == 0.9:\n random_choice = np.random.choice(bn[1][1], 1, bn[1][1].all(), bn[1][1])\n x[1] = random_choice\n\n # Third Joint Prob\n if random_choice[0] == 0.8 or random_choice == 0.1:\n random_choice = np.random.choice(bn[2][0], 1, bn[2][0].all(), bn[2][0])\n x[2] = random_choice\n else:\n random_choice = np.random.choice(bn[2][1], 1, bn[2][1].all(), bn[2][1])\n x[2] = random_choice\n return x", "def generate_points_from_distribution(distribution, amount):\n\tchoices = []\n\tprobabilities = distribution.counted.values.astype('float')\n\tfor n in range(amount):\n\t\tchoose_from = normalize_array(probabilities)\n\t\tchoice = np.random.choice(distribution.index.values, p=choose_from)\n\t\tchoices.append(distribution.loc[choice].geometry)\n\t\twherewasit = np.argwhere(distribution.index.values == choice)[0][0]\n\t\tprobabilities[wherewasit] -= 1/amount\n\t\tprobabilities[probabilities < 0] = 0\n\treturn choices", "def select_from_strategy(dist: dict):\n\n val = random()\n for x in dist:\n val -= dist[x]\n if val <= 0:\n return x\n\n # Not properly normalised!\n return None", "def evaluate_one(self, x):\n # p = 1. / (np.sqrt(2. * np.pi) * self.sigma) * \\\n # np.exp(-0.5 * (self.mean - x) * self.invvar * (self.mean - x))\n p = self.dist.probability(x)\n return p", "def find_nominal_point(p):\n num_p = p.shape[0]\n num_d = p.shape[1]\n m = Model(\"nominal\")\n u = m.addVar(name=\"u\", lb=0)\n \n y = m.addVars(range(num_p*num_d), vtype=GRB.CONTINUOUS, obj=0.0, name=\"y\")\n # new nominal point\n beta = m.addVars(range(num_d), vtype=GRB.CONTINUOUS, obj=0.0, name=\"beta\", lb=0)\n \n m.setObjective(u, GRB.MINIMIZE)\n \n for i in range(num_p):\n m.addConstr(u, GRB.GREATER_EQUAL, quicksum(y[i*num_d+j] for j in range(num_d)), \"u_\"+str(i))\n\n for i in range(num_p):\n for j in range(num_d):\n m.addConstr(y[i*num_d+j], GRB.GREATER_EQUAL, p[i,j]-beta[j], \"c1\"+str(i))\n m.addConstr(y[i*num_d+j], GRB.GREATER_EQUAL, beta[j]-p[i,j], \"c2\"+str(i))\n\n m.setParam( 'OutputFlag', False )\n m.optimize()\n \n #print('Obj: %g' % m.objVal) \n \n #for v in m.getVars():\n # print('%s %g' % (v.varName, v.x))\n \n threshold = 0\n for v in m.getVars():\n if v.varName == \"u\":\n threshold = v.x\n break\n \n nominal_params = m.getAttr('x', beta)\n \n nominal_p = []\n for i in range(num_d):\n nominal_p.append(nominal_params[i])\n \n return nominal_p, threshold#tuple(nominal_p)", "def get_label(prob_label, target):\n return target if random.random() <= prob_label else 1 - target", "def random_point(bounds):\n return Point(PointSampler.random_coords(bounds))", "def default_selection(random, population, args):\r\n return population", "def puct_choice(node):\n return np.argmax(puct_distribution(node))", "def sample_from(self, p):\n return np.searchsorted(np.cumsum(p), np.random.rand())", "def __generate_point_based_on_prob(self) -> Point:\n possible = False\n while not possible:\n # make the random decision based on a distribution (hot spots / different probabilities)\n prob_list = self.probability_distribution_grid.flatten()\n selected_index = np.random.choice(\n np.arange(0, len(prob_list)), p=prob_list)\n\n # get the indices of the cell (from the one array index)\n # width is the number of cells in x directions (it starts with cell 0/0) and is needed due to row-major order\n cell_x = int(selected_index % self.occupancy_map.info.width)\n cell_y = int(selected_index / self.occupancy_map.info.width)\n\n # get the real world coordinates (which represents the center of the cell)\n x = self.occupancy_map.info.origin.position.x + \\\n (cell_x + 0.5) * self.occupancy_map.info.resolution\n y = self.occupancy_map.info.origin.position.y + \\\n (cell_y + 0.5) * self.occupancy_map.info.resolution\n\n # Check if the actual cell is free of STATIC obstacles (not occupied)\n if not self.__cell_is_occupied(cell_x, cell_y):\n # Check for not occupied neighbors (the robot needs some space the reach it)\n if not self.__has_occupied_neighbors(cell_x, cell_y):\n # If actual spawning of dirt is enabled, then it should also be ensured that no other dirt object is already\n # at this position, because spawning a model in the same location of an already existing model can lead to problems\n if not self.prevent_duplicates or not self.__check_for_duplicates(Point(x, y, 0.0)):\n possible = True\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to already \"\n \"active dirt at this position.\\n\\tGenerating next one...\\n\" % (x, y))\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to occupied neighbor \"\n \"cells.\\n\\tGenerating next one...\\n\" % (x, y))\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to occupied cell.\"\n \"\\n\\tGenerating next one...\\n\" % (x, y))\n return Point(x=x, y=y, z=0.0)", "def _sample_pos(self, assign_result, num_expected, **kwargs):\n pos_inds = torch.nonzero(assign_result.gt_inds > 0)\n if pos_inds.numel() != 0:\n pos_inds = pos_inds.squeeze(1)\n if pos_inds.numel() <= num_expected:\n return pos_inds\n else:\n return self.random_choice(pos_inds, num_expected)", "def choose(self):\n\n i = bisect.bisect(self._p, random.random())\n return self._values[i]", "def choice(population,weights):\r\n\tassert len(population) == len(weights)\r\n\tcdf_vals=cdf(weights)\r\n\treturn population[bisect.bisect(cdf_vals, random.random())]", "def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)", "def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)", "def _select_from(arr, probs=None):\n if probs is None:\n return arr[randint(0, len(arr) - 1)]\n else:\n r = random()\n s = 0\n for i in range(len(probs)):\n s += probs[i]\n if s > r:\n return arr[i]\n return arr[len(arr) - 1]", "def touching_choice(self,p):\n choose = random.sample(part,2)\n\n return choose", "def targetpoint(self, initpoint):\n while True:\n col = int(random.uniform(0, COLS))\n row = int(random.uniform(0, ROWS))\n if (row, col) != initpoint:\n break\n return (row, col)", "def posterior_first(self, word):\r\n prob = {}\r\n if word not in prob.keys():\r\n prob[word] = {\r\n pos: self.emission_probability[pos][word]\r\n * self.initial_probability[pos]\r\n if word in self.emission_probability[pos]\r\n else (1 / float(10 ** 10)) * self.initial_probability[pos]\r\n for pos in self.position_list\r\n }\r\n\r\n return prob[word]", "def peturb(param):\n ann = param.annotation\n if ann == inspect._empty:\n ann = 'normal'\n if type(ann)==str:\n if ann == 'normal':\n return param.default + np.random.normal()\n elif ann == 'positive':\n return abs(param.default + np.random.normal())\n elif type(ann) == tuple:\n # Get a number from uniform random distribution\n # bounded by values in the annotation tuple.\n if type(ann[0]) == float:\n return np.random.uniform(*ann)\n elif type(ann[0]) == int:\n return np.random.randint(*ann)\n else:\n print('Unrecognised function annotation.')", "def select_target_point(state, target_pt_num=1024):\n point_state = state[0][0]\n target_mask = get_target_mask(point_state)\n # removing gripper point later\n point_state = point_state[:4, target_mask] # \n gripper_pc = point_state[:4, :6] # \n point_num = min(point_state.shape[1], target_pt_num)\n obj_pc = regularize_pc_point_count(point_state.T, point_num, False).T\n point_state = np.concatenate((gripper_pc, obj_pc), axis=1)\n return [(point_state, state[0][1])] + state[1:]", "def insert_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])))\n mutated_genome[index][2].insert(point_index, point)", "def getP1(self):\n return self.points[0]", "def sample(p, temperature, key, num_samples=1):\n tol = 1e-7\n p = np.clip(p, tol, 1 - tol)\n logit_p = logit(p)\n base_randomness = random.logistic(key, shape=(num_samples, *p.shape))\n return nn.sigmoid((logit_p + base_randomness) / (temperature + tol))", "def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r", "def prob(self, cut):\n return self._root.prob(cut)", "def probability(self, samples):\n pass", "def goals():\n rand_nmr = random.random()\n if rand_nmr < 0.5:\n return 1\n elif rand_nmr < 0.8:\n return 2\n elif rand_nmr < 0.97:\n return 3\n else:\n return 4", "def select_action(policy, state):\n state = torch.from_numpy(state).long().unsqueeze(0)\n state = torch.zeros(3,9).scatter_(0,state,1).view(1,27)\n pr = policy(Variable(state))\n m = torch.distributions.Categorical(pr)\n action = m.sample()\n log_prob = torch.sum(m.log_prob(action))\n return action.data[0], log_prob", "def _logprob(self, sample):\n return 0, 0", "def probability_of_default(model, prediction_features):\n return model.predict_proba(prediction_features)[:, 1]", "def _get_sample(self):\n p = self._get_mean()\n u = self.random.random_sample(p.shape)\n sample = u < p\n return sample", "def choose_next(self, round):\n return random.choice(self.possible_coords)", "def sample(self, probability):\n return random.uniform(0, 1) < probability", "def next_interaction_point(p0, v0, mu=mu):\r\n d = -1/mu * np.log(random.random()) # Path length in cm according to Vassilev section 4.2.2\r\n p = [p0[i] + v0[i]*d for i in range(3)]\r\n return p", "def _sample_pos(self, assign_result, num_expected, **kwargs):\n pos_inds = torch.nonzero(assign_result.gt_inds > 0)\n if pos_inds.numel() != 0:\n pos_inds = pos_inds.squeeze(1)\n if pos_inds.numel() <= num_expected:\n repeat_ = num_expected // pos_inds.numel()\n return torch.cat((pos_inds.repeat(repeat_), self.random_choice(pos_inds, num_expected % pos_inds.numel())))\n else:\n return self.random_choice(pos_inds, num_expected)", "def get_pgeom(aor, e):\n return 1. / (aor * (1 - e*e)) * (aor > 1.0)", "def greedy_proportional_strategy(our_hist, their_hist):\n if len(our_hist) == 0 or len(their_hist) == 0:\n return choice(CHOICES)\n freqs = count(their_hist)\n prediction_for_them = np.argmax(freqs)\n return CHOICES[(prediction_for_them + 1) % 3]", "def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)", "def sample(self, x):", "def prob1():\n x, y = sy.symbols('x, y')\n return sy.Rational(2,5) * sy.exp(x**2 - y) * sy.cosh(x + y) + \\\n sy.Rational(3,7) * sy.log(x*y + 1)", "def mutate_point_poly(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 3: seed = 0\n if seed == 0:\n insert_point(mutated_genome,index)\n elif seed == 1:\n remove_point(mutated_genome,index)\n elif seed == 2:\n switch_points(mutated_genome,index)\n elif seed == 3:\n shuffle_points(mutated_genome,index)\n elif seed == 4:\n move_point(mutated_genome,index)\n elif seed == 5:\n shift_point(mutated_genome,index)\n elif seed == 6:\n increment_point(mutated_genome,index)\n else: #seed == 7:\n decrement_point(mutated_genome,index)", "def random_select(population, lamda):\n fitness_population = []\n for i in population:\n f_i = fitness_function(i, lamda)\n fitness_population.append(f_i)\n pList = selection_probability(fitness_population)\n rnd_indices = np.random.choice(len(population), p=pList)\n choice = population[rnd_indices]\n return choice", "def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p", "def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p", "def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p", "def conditional_prob(self, label, datapoint):\r\n\r\n # REPLACE THE COMMAND BELOW WITH YOUR CODE\r\n feat_vec = self.x[datapoint]\r\n\r\n if label == 1:\r\n return self.conditional_prob_1(feat_vec)\r\n\r\n return 1 - self.conditional_prob_1(feat_vec)", "def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1", "def _graph_fn_sample_stochastic(distribution):\n return distribution.sample()", "def _starting_prob(self, s):\n return self._starting_state_distribution.pdf(s)", "def p_pits(self, index):\n if index == 1:\n return self.p1_pits()\n else:\n return self.p2_pits()", "def prob1(n):\n\n # create a giant draw from a normal distribution\n random_draws = np.random.normal(loc= 0, scale = 1, size = n)\n\n # mask the values\n mask = random_draws > 3\n\n return np.sum(mask)/float(n)", "def _randomize_one(p, v):\n if any(p.endswith(s) for s in ('_pd_n', '_pd_nsigma', '_pd_type')):\n return v\n else:\n return np.random.uniform(*parameter_range(p, v))", "def eval_sampling_point(self, sampling_point):\n return Solution(self, sampling_point)", "def select_action(policy, state):\n #torch.manual_seed(RAND_SEED) # Seed here is causing kernel to crash\n state = torch.from_numpy(state).long().unsqueeze(0)\n state = torch.zeros(3,9).scatter_(0,state,1).view(1,27)\n #print(state) # for 2b\n pr = policy(Variable(state))\n #print(pr) # for 2c\n m = torch.distributions.Categorical(pr)\n action = m.sample()\n log_prob = torch.sum(m.log_prob(action))\n return action.data[0], log_prob", "def calc_p(ran):\n import random\n from math import sqrt\n\n p = None; hits = 0; misses = 0\n\n for i in range(ran):\n x = random.random() * 2 - 1\n y = random.random() * 2 - 1\n r = sqrt(x**2 + y**2)\n if r <= 1:\n hits += 1\n \n p = 4 * hits / ran\n\n return p", "def choose_option(self, state):\n options = [o for o in self.options if o.initiation_set[state] == 1]\n return random.choice(options)", "def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())", "def single_point_mutation(variables_number: int,\n mutation_chance: float) -> MutationPointsTyping:\n scaled_mutation_chance = variables_number * mutation_chance\n if generate_random_float(0, 1) <= scaled_mutation_chance:\n return [choose_random_value(values_pool=range(variables_number))]\n return []", "def _get_visual_position(self, point: int) -> float:\n return point / self._param[\"n_v\"] + np.random.uniform() / \\\n self._param[\"n_v\"]", "def bernoulli_trial(p: float) -> int:\n return 1 if random.random() < p else 0", "def prob_given_state(self, start=1, end=len(self.T)): # , start, end):\n\n # for state_index in range(len(self.tagset)):\n # self.alpha[1][state_index] = 0\n\n raise NotImplementedError", "def increment_point(mutated_genome,index):\n point_index1 = random.randint(0,max(0,len(mutated_genome[index][2])-2))\n seed = random.randint(0,2)\n if seed == 0:\n point_index2 = point_index1 + 1\n elif seed == 1:\n point_index2 = random.randint(point_index1,max(0,len(mutated_genome[index][2])-1))\n else: #seed == 2:\n point_index2 = max(0,len(mutated_genome[index][2])-1)\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp", "def fitness_key(self, x):\n dv, c3, ventr = x.population.champion.f\n #print dv, c3, ventr\n if dv == 0 and ventr == 0:\n return -10000.0/c3\n else:\n return dv+ c3+ ventr", "def conditional_sample(p, y, temperature, key):\n tol = 1e-7\n p = np.clip(p, tol, 1 - tol)\n\n v = random.uniform(key, shape=y.shape)\n v_prime = (v * p + (1 - p)) * y + (v * (1 - p)) * (1 - y)\n v_prime = np.clip(v_prime, tol, 1 - tol)\n\n logit_v = logit(v_prime)\n logit_p = logit(p)\n return nn.sigmoid((logit_p + logit_v) / (temperature + tol))", "def __getitem__(self,point):\n point=point.normalize(self.size)\n return self.terrain[point.y][point.x]", "def touching_choice(self,p):\n\n part = ['head', 'foot1', 'foot2', 'foot3', 'foot4', 'back', 'stomach', 'tail']\n if len(self.select[p]) == 0:\n return random.sample(part,2)\n elif len(self.select[p]) == 1:\n part.remove(self.select[p][0])\n c = random.sample(part,1)\n return [self.select[p][0], c[0]]\n else:\n return random.sample(self.select[p],2)", "def pt(x,y):\n t = sample_side\n global d, ds, samples, mask\n d = depth[y-t:y+t,x-t:x+t]\n\n # This is where I choose which point in the sample to use. I take\n # the minimum, which is the nearest pixel. Other possibilities\n # are median, mean, etc.\n if method=='median':\n meand = np.median(d[d<2047])\n if method=='mean':\n meand = np.mean(d[d<2047])\n if method=='min':\n meand = d[d<2047].min()\n if method=='kmeans':\n import Pycluster\n labels, error, nfound = Pycluster.kcluster(d.reshape(-1,1),4)\n labels = labels.reshape(d.shape)\n means = np.array([d[labels==i].mean() for i in range(labels.max()+1)])\n nearest = np.argmin(means)\n mask = labels==nearest\n samples = d[mask]\n\n def radius(target):\n x,y = np.nonzero(d == target)\n return np.sqrt((x[0]-sample_side/2)**2+(y[0]-sample_side/2)**2)\n cands = (samples.min(), samples.max())\n rads = [radius(i) for i in cands]\n\n meand = means.min()\n #meand = cands[np.argmax(rads)]\n #meand = np.median(samples)\n #meand = samples.min() if np.median(samples) > np.mean(samples) else samples.max()\n return x,y,meand,1", "def shuffle_points(mutated_genome,index):\n random.shuffle(mutated_genome[index][2])", "def predict_one(self):\n return (self.contexted.calc_p(\"\", self.seen + \"1\") /\n float(self.contexted.calc_p(\"\", self.seen)))", "def bernoulli(p):\r\n if np.random.random() < p:\r\n return 0\r\n else:\r\n return 1", "def _sample_using_random(\n self,\n p: float = 0.1,\n ):\n return sa.func.random() < p", "def sample(a, p):\n if (len(a) != len(p)):\n raise Exception('a != p')\n p = np.array(p)\n p = p / p.sum()\n r = random.random()\n n = len(a)\n total = 0 # range: [0,1]\n for i in xrange(n):\n total += p[i]\n if total > r:\n return a[i]\n return a[i]", "def posterior_sample(self):\n pass", "def switch_points(mutated_genome,index):\n point_index1 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n point_index2 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp", "def pickANeighbor(self):\n # pick a random feature\n featureIndex = random.randint(0, len(self.features) - 1)\n # get a pointer to that feature\n feature = self.features[featureIndex]\n # pick a random number based on the size of the feature's domain\n domainIncrement = random.randint(0, len(feature.domain) - 1)\n # get the index within the domain of the current feature value\n domainIndex = feature.domain.index(feature.value)\n # go to a different value in the domain\n newDomainIndex = (domainIndex + domainIncrement) % len(feature.domain)\n # assign the value from the domain\n feature.value = feature.domain[newDomainIndex]\n # return the feature and value that changed\n return (featureIndex, domainIndex)", "def sample_response(self, slate_p):\n slate_p[slate_p >= 0.5] = 1.0\n slate_p[slate_p < 0.5] = 0.0\n# m = Bernoulli(slate_p)\n# return m.sample()\n return slate_p", "def sample(self, probs):\n all_abstain = (self.label_matrix == -1).sum(axis=1) == self.label_matrix.shape[1]\n self.is_in_pool = (self.ground_truth_labels == -1) & ~ all_abstain & (self.y_train != -1)\n self.valid_buckets = np.unique(self.unique_inverse[self.is_in_pool])\n self.is_valid_bucket = np.array([\n True if i in self.valid_buckets else False for i in range(len(self.unique_idx))])\n self.bucket_probs = probs.detach().numpy()[self.unique_idx]\n\n pick = random.uniform(0, 1)\n if pick < self.randomness:\n # Choose random bucket instead of following a specific query strategy\n chosen_bucket = np.random.choice(self.valid_buckets)\n else:\n chosen_bucket = np.random.choice(self.query())\n\n return random.choice(np.where((self.unique_inverse == chosen_bucket) & self.is_in_pool)[0])", "def select_action(self, state):\r\n policy_s = self.epsilon_greedy_probs(self.nA, self.Q[state], self.count, self.epsilon)\r\n return np.random.choice(np.arange(self.nA), p=policy_s)" ]
[ "0.65602857", "0.63671464", "0.6342662", "0.6314846", "0.6178516", "0.6008465", "0.5948356", "0.5907734", "0.59043133", "0.58996654", "0.58957005", "0.5890248", "0.58802027", "0.586684", "0.5834397", "0.57978284", "0.57956886", "0.578882", "0.57851636", "0.5766072", "0.57457227", "0.573155", "0.5699313", "0.5677435", "0.56740767", "0.56716657", "0.5670466", "0.566275", "0.56591016", "0.56510735", "0.564893", "0.56482476", "0.56453574", "0.56327313", "0.5611205", "0.56097007", "0.56097007", "0.55921733", "0.5591228", "0.55833757", "0.5583002", "0.5580714", "0.55795795", "0.557898", "0.55741596", "0.5570109", "0.5569742", "0.5567276", "0.55431145", "0.554121", "0.5536896", "0.55281174", "0.5523146", "0.5517623", "0.5516465", "0.5513582", "0.5506375", "0.550009", "0.5493833", "0.5490388", "0.5483347", "0.54803246", "0.54713404", "0.54702854", "0.5467973", "0.54592985", "0.54592985", "0.54592985", "0.5458911", "0.54545873", "0.54481405", "0.5441519", "0.5440908", "0.54383224", "0.5431335", "0.5425248", "0.5424477", "0.54209167", "0.541524", "0.5411366", "0.54103744", "0.54091066", "0.54077834", "0.54044056", "0.54040354", "0.5398252", "0.53966355", "0.5396563", "0.5386117", "0.538348", "0.5372036", "0.53669786", "0.5366608", "0.5358008", "0.5356699", "0.53554124", "0.53540415", "0.5352181", "0.5351717", "0.53425646", "0.5341104" ]
0.0
-1
Selecting distinct pair of forests for crossover. Probability of selecting one forest is as much as that fitness function is better.
def selection(self): first = self.select_by_prob() second = first while self._forests[first] == self._forests[second]: second = self.select_by_prob() return self._forests[first], self._forests[second]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crossoverIndividuals(father, mother, bwsFitnessFunction, highIsGood):\n\n #choose depth of crossover point at random\n crossoverDepth = round(random.uniform(1,father.getDepth()))\n\n #get all subtrees of father and mother at that layer of deepness\n fatherNodesAtLayer = father.getNodesAtDepth(crossoverDepth)\n motherNodesAtLayer = mother.getNodesAtDepth(crossoverDepth)\n\n numberOfNodesinLayer = pow(2, crossoverDepth)\n\n #if no fitnessfunction is supplied, use random crossover\n if bwsFitnessFunction is None:\n indexM = round(random.uniform(0,numberOfNodesinLayer - 1))\n indexF = round(random.uniform(0,numberOfNodesinLayer - 1))\n\n #if bws (Best-Worst-Subtree) crossover is used, at crossoverDepth\n #find the best subtree from father and the worst from mother\n else:\n fitnessValuesOfFatherNodes = list(map(bwsFitnessFunction, fatherNodesAtLayer))\n fitnessValuesOfMotherNodes = list(map(bwsFitnessFunction, motherNodesAtLayer))\n\n if highIsGood:\n indexF = fitnessValuesOfFatherNodes.index(max(fitnessValuesOfFatherNodes))\n indexM = fitnessValuesOfMotherNodes.index(min(fitnessValuesOfMotherNodes))\n else:\n indexF = fitnessValuesOfFatherNodes.index(min(fitnessValuesOfFatherNodes))\n indexM = fitnessValuesOfMotherNodes.index(max(fitnessValuesOfMotherNodes))\n\n fatherCrossOverNode = copy.deepcopy(fatherNodesAtLayer[indexF])\n\n #exchange identified crossover nodes\n child = copy.deepcopy(mother)\n child.updateSubTree(crossoverDepth, indexM, fatherCrossOverNode)\n\n return child", "def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring", "def _next_generation(self, previous_generation):\n self._fullInput, self._fullOutput = previous_generation.get_data()\n self.power = self.settings.population_count\n for forest_iteration in range(self.power):\n first, second = previous_generation.selection()\n print 'selected for crossover ->', first.fitness, second.fitness\n self._forests.append(OneForest(self.settings, first_forest=first, second_forest=second))", "def best_crossover(feature_set, population):\n new = []\n pop = [x for y in population for x in y]\n most = pop[0]\n max = float(\"-inf\")\n all = list(set(pop))\n for j in range(0, len(feature_set)):\n for i in all:\n x = pop.count(i)\n if x > max:\n max = x\n most = i\n new.append(most)\n pop = filter(lambda a: a != most, pop)\n max = float(\"-inf\")\n\n return set(new).union(feature_set)", "def best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\titerative_based_score = 0\n\t# given that all pairs use the same amount of features, the position 0 was arbitrarily selected to compute the number of features being used\n\tmin_number_features = int(0.15*len(train_features[0]))\n\tmax_number_features = int(0.85*len(train_features[0]))\n\n\t# min_number_features = 19\n\t# max_number_features = 20\n\n\titerative_based_selector = None\n\titerative_based_train_features_selected = None\n\titerative_based_test_features_selected = None\n\n\tfor i in range(min_number_features, max_number_features):\n\t\tprint(i)\n\t\ttemp_iterative_based_selector = RFE(RandomForestRegressor(n_estimators=100), n_features_to_select=i)\n\t\ttemp_iterative_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_iterative_based_train_features_selected = temp_iterative_based_selector.transform(train_features)\n\t\ttemp_iterative_based_test_features_selected = temp_iterative_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_iterative_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_iterative_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Iterative Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > iterative_based_score:\n\t\t\titerative_based_score = temp_score\n\t\t\titerative_based_selector = temp_iterative_based_selector\n\t\t\titerative_based_train_features_selected = temp_iterative_based_train_features_selected\n\t\t\titerative_based_test_features_selected = temp_iterative_based_test_features_selected\n\n\titerative_based_mask = iterative_based_selector.get_support()\n\tprint(\"This is the iterative based mask: \")\n\tprint(iterative_based_mask)\n\n\treturn iterative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def crossover(p1, p2):\n genotype = []\n \n #Your code here\n \n return {'genotype': genotype, 'fitness': None}", "def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population", "def _crossover(self, best_population, crossover, n_parents=2, method=\"uniform_swap\"):\n if crossover:\n # randomly select parents\n parents_indexes = torch.randint(0, len(best_population), (self.population_size, n_parents),\n device=self.device)\n new_population = torch.zeros(self.population.shape, device=self.device)\n i = 0\n for p_idx in parents_indexes:\n new_population[i] = self._produce_child(best_population[p_idx], method=method)\n i += 1\n else:\n # randomly repeat best individuals\n new_pop_indexes = torch.randint(0, len(best_population), (self.population_size,), device=self.device)\n new_population = best_population[new_pop_indexes]\n return new_population", "def _apply_crossover(pop, op, pb):\n for i in range(1, len(pop), 2):\n if random.random() < pb:\n pop[i - 1], pop[i] = op(pop[i - 1], pop[i])\n del pop[i - 1].fitness.values\n del pop[i].fitness.values\n return pop", "def heuristic_crossover(random, candidates, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n \r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n \r\n # Since we don't have fitness information in the candidates, we need \r\n # to make a dictionary containing the candidate and its corresponding \r\n # individual in the population.\r\n population = list(args['_ec'].population)\r\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\r\n \r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for mom, dad in zip(moms, dads):\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n negpos = 1 if mom_is_better else -1\r\n val = d if mom_is_better else m\r\n bro[i] = val + random.random() * negpos * (m - d)\r\n sis[i] = val + random.random() * negpos * (m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children", "def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population", "def heuristic_crossover(random, candidates, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n \n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n \n # Since we don't have fitness information in the candidates, we need \n # to make a dictionary containing the candidate and its corresponding \n # individual in the population.\n population = list(args['_ec'].population)\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\n \n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for mom, dad in zip(moms, dads):\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\n for i, (m, d) in enumerate(zip(mom, dad)):\n negpos = 1 if mom_is_better else -1\n val = d if mom_is_better else m\n bro[i] = val + random.random() * negpos * (m - d)\n sis[i] = val + random.random() * negpos * (m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children", "def selection(pop, fitness, n_keep):\n total_fit = sum([1.0 / x for x in fitness])\n prob = []\n cumulative = 0.0\n for f in fitness:\n cumulative += ((1.0 / f) / total_fit)\n prob.append(cumulative)\n new_pop = []\n for i in range(len(pop) - n_keep):\n r = np.random.rand()\n for j in range(len(prob)):\n if prob[j] >= r:\n new_pop.append(pop[j])\n break\n return new_pop", "def feature_selection(cls, tbl, thresh=-1):\n\n numerical_columns = [col for col in tbl.columns if col not in [\"F21\", \"F20\", \"F54\", \"Name\"]]\n X = tbl[numerical_columns[:-1]].values\n y = tbl[numerical_columns[-1]].values\n\n n = X.shape[1]\n slist = np.zeros((n, 3))\n slist[:, -1] = 1\n\n # identify relevant features\n slist[:, 0] = cls._c_correlation(X, y) # compute 'C-correlation'\n idx = slist[:, 0].argsort()[::-1]\n slist = slist[idx,]\n slist[:, 1] = idx\n if thresh < 0:\n thresh = np.median(slist[-1, 0])\n\n slist = slist[slist[:, 0] > thresh, :] # desc. ordered per SU[i,c]\n\n \"Identify redundant features among the relevant ones\"\n cache = {}\n m = len(slist)\n p_su, p, p_idx = cls._get_first_element(slist)\n for i in xrange(m):\n q_su, q, q_idx = cls._get_next_element(slist, p_idx)\n if q:\n # p, q = int(p), int(q)\n while q:\n if (p, q) in cache:\n pq_su = cache[(p, q)]\n else:\n pq_su = cls._symmetrical_uncertainty(X[:, int(p)], X[:, int(q)])\n cache[(p, q)] = pq_su\n\n if pq_su >= q_su:\n slist = cls._remove_element(slist, q_idx)\n q_su, q, q_idx = cls._get_next_element(slist, q_idx)\n\n p_su, p, p_idx = cls._get_next_element(slist, p_idx)\n if not p_idx:\n break\n\n sbest = slist[slist[:, 2] > 0, :2]\n selected_features = [int(ff) for ff in sbest[:, 1]]\n selected_features = [numerical_columns[i] for i in selected_features]\n selected_features.insert(0, \"Name\")\n selected_features.append(\"category\")\n new_tbl = tbl[selected_features]\n\n return new_tbl", "def best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tmodel_based_score = 0\n\tscaling_factors = [\"0.25*mean\", \"0.5*mean\", \"median\", \"1.25*mean\", \"1.5*mean\"]\n\t# scaling_factors = [\"0.5*mean\", \"median\"]\n\tmodel_based_selector = None\n\tmodel_based_train_features_selected = None\n\tmodel_based_test_features_selected = None\n\n\tfor factor in scaling_factors:\n\t\tprint(factor)\n\t\ttemp_model_based_selector = SelectFromModel(RandomForestRegressor(n_estimators=100), threshold=factor)\n\t\ttemp_model_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_model_based_train_features_selected = temp_model_based_selector.transform(train_features)\n\t\ttemp_model_based_test_features_selected = temp_model_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_model_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_model_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Model Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > model_based_score:\n\t\t\tmodel_based_score = temp_score\n\t\t\tmodel_based_selector = temp_model_based_selector\n\t\t\tmodel_based_train_features_selected = temp_model_based_train_features_selected\n\t\t\tmodel_based_test_features_selected = temp_model_based_test_features_selected\n\n\tmodel_based_mask = model_based_selector.get_support()\n\tprint(\"This is the model based mask: \")\n\tprint(model_based_mask)\n\n\treturn model_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask", "def _select(self):\n sel = []\n\n # choose randomly while favouring fit individuals\n lp = len(self.population) // 2\n for _ in range(lp):\n idx1 = idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n while idx1 == idx2:\n idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n\n p1, p2 = self.population[idx1], self.population[idx2]\n sel.append((p1, p2))\n\n return sel", "def step(individuals, grammar, replacement, selection, fitness_function, best_ever):\n #Select parents\n parents = selection(individuals)\n #Crossover parents and add to the new population\n new_pop = []\n while len(new_pop) < GENERATION_SIZE:\n new_pop.extend(onepoint_crossover(*random.sample(parents, 2)))\n #Mutate the new population\n new_pop = list(map(int_flip_mutation, new_pop))\n #Evaluate the fitness of the new population\n evaluate_fitness(new_pop, grammar, fitness_function)\n #Replace the sorted individuals with the new populations\n individuals = replacement(new_pop, individuals)\n best_ever = max(best_ever, max(individuals))\n return individuals, best_ever", "def randomForestClassifier(self, train_cols, test_cols, targets, feature_selction_var, min_abundance_threshold, shuffle=False):\n from sklearn.ensemble import RandomForestClassifier\n #from sklearn.ensemble import RandomForestRegressor\n \n #train = self.abundance_df.loc[:,train_cols] #train.as_matrix(cols)\n train = self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols] #train.as_matrix(cols)\n #test = self.abundance_df.loc[:,test_cols] #.as_matrix(test_cols)\n test = self.abundance_df[self.abundance_df['masked']==False].loc[:,test_cols] #.as_matrix(test_cols)\n #names = list(self.abundance_df.loc[:, 'species'])\n names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'])\n \n #most_common_species_set = set()\n #for col in train_cols:\n # sorted_series = self.abundance_df.loc[:, col].sort_values(ascending=False)[:100]\n # most_common_species_set |= set(list(sorted_series.index))\n #most_common_species_list = []\n #for id0 in most_common_species_set:\n # #print(max(self.abundance_df.loc[id0,train_cols]))\n # if max(self.abundance_df.loc[id0,train_cols]) >= min_abundance_threshold:\n # most_common_species_list.append(id0)\n ##print(len(most_common_species_list))\n #most_common_species_set = set(most_common_species_list)\n #train = train.loc[list(most_common_species_set),:]\n #test = test.loc[list(most_common_species_set),:]\n #names = list(self.abundance_df.loc[list(most_common_species_set),'species'])\n \n #feature selection by variance\n from sklearn.feature_selection import VarianceThreshold\n sel = VarianceThreshold(threshold=(0.999 * (1 - 0.999))) \n if feature_selction_var:\n #ds1 = np.transpose(ds10.as_matrix())\n #ds1 = sel.fit_transform(np.transpose(ds10.as_matrix()))\n #ds2 = np.transpose(ds20.as_matrix())\n #train = sel.fit_transform(np.transpose(train.as_matrix()))\n train = sel.fit_transform(np.transpose(train.values))\n \n #names = list(self.abundance_df.loc[:, 'species'].as_matrix()[sel.get_support()])\n #names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'].as_matrix()[sel.get_support()])\n names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'].values[sel.get_support()])\n #test = sel.fit_transform(np.transpose(test.as_matrix()))\n test = sel.fit_transform(np.transpose(test.values))\n ds10 = np.asmatrix(train)[[i for i, j in enumerate(targets) if j == 0],:]\n ds1 = np.transpose(sel.fit_transform(np.transpose(ds10)))\n else:\n\n #train = np.transpose(train.as_matrix())\n train = np.transpose(train.values)\n #test = np.transpose(test.as_matrix())\n test = np.transpose(test.values)\n ds10 = train.iloc[:,[i for i, j in enumerate(targets) if j == 0]]\n #ds1 = np.transpose(ds10.as_matrix())\n ds1 = np.transpose(ds10.values)\n\n if shuffle == 'index':\n from random import shuffle\n shuffle(names)\n\n #rf = RandomForestClassifier(n_estimators=10)\n target = targets \n #group1 = list(self.abundance_df.loc[:,train_cols].columns[:target.count(0)])\n group1 = list(self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols].columns[:target.count(0)])\n #group2 = list(self.abundance_df.loc[:,train_cols].columns[target.count(0):])\n group2 = list(self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols].columns[target.count(0):])\n\n #rf = RandomForestRegressor(n_estimators=1000)#, class_weight=\"balanced\")\n rf = RandomForestClassifier(n_estimators=1000) # bootstrap=False\n #, max_features=100)#, min_sample_leaf=50)\n #rf = RandomForestRegressor(n_estimators=20, max_features=2)\n #class_weight=\"balanced\" #{class_label: weight}\n #n_estimators=1000,\n rf.fit(train, target)\n \n #from sklearn.metrics import roc_auc_score\n #for l in leaf:\n #model = RandomForestRegressor(min_samples_split=2, max_depth=None, bootstrap=False, min_samples_leaf=2)\n # #n_estimator=200, oob_score=True, min_samples_leaf=10,max_features=f, \n #model.fit(train,target)\n # #print(\"AUC - ROC : \")\n # #print(roc_auc_score(target,model.oob_prediction_))\n # #print(model.feature_importances_)\n \n #from sklearn.ensemble import ExtraTreesClassifier\n #model = ExtraTreesClassifier()\n #model.fit(train, target)\n \n from treeinterpreter import treeinterpreter as ti\n prediction, bias, contributions = ti.predict(rf, np.array(train))\n \n #for i in range(len(train)):\n # j = 0\n # # print(i)\n # #print(\"\\tBias (trainset mean)\")\n # #print(bias[i])\n # # print(contributions[0][0])\n # #for c, feature in sorted(zip(contributions[i], \n # # names), \n # # #self.abundance_df.index), \n # # key=lambda x: -abs(x[0])):\n # for c, feature in zip(contributions[i], list(self.abundance_df.index)):\n # if c[0] != 0:\n # #print feature, ':\\t', \"{:.2e}\".format(c), '\\t', self.abundance_df.loc[feature, 'species']\n # if j <10:\n # # print()'\\t' + self.abundance_df.loc[feature, 'species'], '\\t', \"{:.2e}\".format(c[0]))\n # j += 1\n totalc = np.mean(contributions, axis=0) \n \n #from sklearn import model_selection\n #from sklearn.model_selection import cross_val_score\n #clf = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0)\n #scores = cross_val_score(clf, X, y)\n \n ##compare 2 groups of samples\n prediction1, bias1, contributions1 = ti.predict(rf, np.array(ds1))\n\n mean_contri = [0 for i in xrange(len(names))]\n for s in xrange(len(ds1)):\n for i in xrange(len(names)):\n mean_contri[i] += contributions1[s][i][0]\n mean_contri = [x/len(ds1)for x in mean_contri]\n \n names_list = []\n #for c, org in sorted(zip(mean_contri, list(self.abundance_df.loc[:,'species'])), reverse=True):\n for c, org in sorted(zip(mean_contri, names), reverse=True):\n if c != 0:\n #print(self.abundance_df.loc[i,group1])\n #idx = self.abundance_df[self.abundance_df['species'] == org].index.tolist()[0]\n idx = self.abundance_df[self.abundance_df['masked']==False][self.abundance_df['species'] == org].index.tolist()[0]\n if shuffle:\n #print(names.index(org))\n #idx = list(self.abundance_df.index)[names.index(org)]\n idx = list(self.abundance_df[self.abundance_df['masked']==False].index)[names.index(org)]\n #maximum = max(self.abundance_df.loc[idx,group1 + group2])\n maximum = max(self.abundance_df[self.abundance_df['masked']==False].loc[idx,group1 + group2])\n #print(str(round(c, 3)) + '\\t' + org + '\\t' + str(round(maximum,3)))\n names_list.append([round(c, 3), org, round(maximum,3)])\n \n return names_list", "def _choose_best_feature(self, X, y, label, sample_weights=None):\n best_feature_idx = 0\n # YOUR CODE HERE\n # Note that you need to implement the sampling feature part here for random forest!\n # Hint: You may find `np.random.choice` is useful for sampling.\n # begin answer\n n_features = X.shape[1]\n if self.sample_feature:\n max_features=max(1, min(n_features, int(np.round(np.sqrt(n_features)))))\n new_features=np.random.choice(n_features, max_features, replace=False)\n new_X=X[:, new_features]\n else:\n new_X=X\n n_new_features=new_X.shape[1]\n #new_features=np.random.choice(n_features, n_features, replace=False)\n #old_cost=self.entropy(y, sample_weights)\n #use C4.5 algorirhm\n best_impurity=None\n best_feature_idx=0\n best_feature_val=X[0, 0]\n for i in range(n_new_features):\n unique_vals=np.unique(X[:,i])\n for value in unique_vals:\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights=self._split_dataset(X, y, label, i, value, sample_weights)\n if len(sub1_y)>0 and len(sub2_y)>0:\n new_impurity=self._impurity(y, sub1_y, sub2_y)\n if best_impurity is None or new_impurity > best_impurity:\n best_impurity=new_impurity\n best_feature_idx=i\n best_feature_val=value \n # end answer\n return best_feature_idx, best_feature_val", "def crossover(self):\n print(' - crossover')\n s = time.time()\n\n # make a list with all index\n tmp_list = list(range(0, self.size))\n while len(tmp_list) > 0:\n candidate_1 = random.choice(tmp_list)\n tmp_list.remove(candidate_1)\n candidate_2 = random.choice(tmp_list)\n tmp_list.remove(candidate_2)\n\n # ceck if the two candidates will crossover\n chance = random.uniform(0, 1)\n if chance <= self.crossover_rate:\n self.crossover_two_candidates(candidate_1, candidate_2)\n\n e = time.time()\n print(\" - time: \", e - s)", "def _feature_selection(self , x ,y):\n # initialize good features list\n # and best scores to keep track of both\n good_features = []\n best_scores = []\n\n # calculating the number of features\n num_features = x.shape[1]\n\n # infinite loop\n while True:\n # intialize best feature and score of this loop\n this_feature = None\n best_score = 0\n\n # loop over all features\n for feature in range(num_features):\n # if feature is already in good features,\n # skip this for loop\n if feature in good_features:\n\n continue\n # selected features are all good till now\n # and current feature\n selected_features = good_features + [feature]\n # remove all other feature from the data\n xtrain = x[: , selected_features]\n # calculate the score , in our case AUC\n score = self.evaluate_score(xtrain , y)\n # if score is greater then the best score\n # of this loop, change best score and best feature\n if score > best_score:\n this_feature = feature\n best_score = score\n\n # if we have selected a feature , add it to\n # the good feature list and update best score list\n if this_feature != None:\n good_features.append(this_feature)\n best_scores.append(best_score)\n\n # if we did not improve during the last two rounds,\n # exit the while loop\n if len(best_score) > 2:\n if best_scores[-1] < best_scores[-2]:\n break\n\n # return the best score and good features\n # why do we remove the last data point?\n return best_scores[:-1] , good_features[:-1]", "def selection(self):\n\n for i in range(self.pop_num*3): # It is important. Next, we will rank the array of parents and children in ascending order of survivability (sum (fit)).\n self.par_and_sons[i].fit = SimpleSegmentationGA.fitness_function(self.gray, self.delta_x, self.length, self.par_and_sons[i].A)\n\n # Sort.\n self.par_and_sons = sorted(self.par_and_sons, key=lambda individ: individ.fit) \n self.population=self.par_and_sons[:self.pop_num].copy()", "def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected", "def chooseFathers(population, choose_father_options=\"best_value\"):\n\n if choose_father_options == \"tournament\":\n tipo = Parents.TOURNAMENT\n elif choose_father_options == \"best_value\":\n tipo = Parents.BEST_VALUE\n elif choose_father_options == \"pairs\":\n tipo = Parents.PAIRS\n else:\n tipo = Parents.TOURNAMENT\n parents = []\n # population.sort(key=lambda x: x.fitnessValue, reverse=False)\n # print(tipo.name)\n if tipo == Parents.TOURNAMENT: # tournament\n # Seleccion por torneo\n # population.sort(key=lambda x: x.fitnessValue, reverse=False)\n limit = int(len(population) / 2)\n for i in range(0, limit):\n parentA = population[i]\n parentB = population[i + 1]\n parents.append(\n parentB if parentB.fitnessValue < parentA.fitnessValue else parentA\n )\n i += 2\n return parents\n elif tipo == Parents.BEST_VALUE: # Best value\n # padres con el mejor valor fitness\n population.sort(key=lambda x: x.fitnessValue, reverse=True)\n limit = int(len(population) / 2)\n for i in range(0, limit):\n parentA = population[i]\n parents.append(parentA)\n return parents\n elif tipo == Parents.PAIRS:\n for j in range(0, len(population)):\n if j % 2 == 0:\n parentB = population[j]\n parents.append(parentB)\n\n return parents", "def crossover(x1,x2):\n for chromo in x1.chromosomes:\n result_chromos = [np.zeros((chromo.shape))]\n #result_chromos = [np.zeros((chromo.shape)) for chromo in x1.chromosomes]\n i = 0\n for j in range(len(x1.chromosomes[i])):\n for k in range(len(x1.chromosomes[i][j])):\n if(np.random.rand(1) < 0.5):\n result_chromos[i][j][k] = x1.chromosomes[i][j][k]\n else:\n result_chromos[i][j][k] = x2.chromosomes[i][j][k]\n if(np.random.rand(1)< 0.8):#at 0.3 very agressive\n result_chromos[i][j][k] += -0.05 + np.random.rand(1)*0.1\n return result_chromos", "def random_forest_grid(features, df, param_dict): \n X= features\n y = df['Severity'].values\n rf = RandomForestClassifier()\n rf_random = RandomizedSearchCV(estimator = rf, param_distributions = param_dict, n_iter = 70, cv = 5, scoring='f1',\n verbose=2, random_state=42, n_jobs = -1)\n result = rf_random.fit(X, y)\n return result.best_score_, result.best_params_", "def pick_grom_group(group, other, selected):\n\treturn Faction(over(group, selected), over(group + other, selected))", "def build_random_forest(X_train, y_train):", "def best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\treturn percentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask", "def run(self):\n population_p = self.create_population()\n population_p = self.sort_population(population_p)\n best_x = population_p[0]\n for k in range(self.iteration):\n population_r = []\n # random.shuffle(population_p)\n for i in range(0, self.population_length, 2):\n mother = 0\n father = 1\n children = [self.random_chromosome(), self.random_chromosome()]\n while (mother == father) or (children[0] in population_p) or (children[1] in\n population_p):\n mother = random.randint(0, self.population_length - 1)\n father = random.randint(0, self.population_length - 1)\n children = self.cross(population_p[mother], population_p[father])\n children[0] = self.mutate(children[0])\n children[1] = self.mutate(children[1])\n\n population_r.append(children[0])\n population_r.append(children[1])\n\n population_p = self.new_population(population_p, population_r)\n if self.fitness(population_p[0]) < self.fitness(best_x):\n best_x = population_p[0]\n\n # print(population_p)\n return best_x", "def cross(self):\n\n for i in range(self.pop_num): # Put in the first pop_num elements of the \"Parents and Sons\" array our entire input population.\n self.par_and_sons[i].A=self.population[i].A.copy()\n\n random.shuffle(self.population) # Shuffle population.\n\n tt=0 # The counter that is needed to implement a non-trivial crossing.\n for s in range(0,self.pop_num,2): # From 0 to pop_num with step 2. That is. here we take pop_num / 2 pairs of parents.\n self.mother.A=self.population[tt+int(self.pop_num/2)].A # Let the last pop_num / 2 individuals of our population be our mothers.\n self.father.A=self.population[tt].A # And let first pop_num / 2 individuals of our population be dads.\n \n tt=tt+1 \n ran=random.random()\n\n for n in range(self.length): # Crossover.\n if random.random()>0.5:\n self.son1.A[n] = self.father.A[n]\n self.son2.A[self.length-1-n] = self.father.A[n]\n self.son3.A[n] = self.mother.A[n]\n self.son4.A[self.length-1-n] = self.mother.A[n]\n else:\n self.son1.A[n] = self.mother.A[n]\n self.son2.A[self.length-1-n] = self.mother.A[n]\n self.son3.A[n] = self.father.A[n]\n self.son4.A[self.length-1-n] = self.father.A[n]\n\n self.par_and_sons[self.pop_num+2*s].A = self.son1.A.copy()\n self.par_and_sons[self.pop_num+2*s+1].A = self.son2.A.copy()\n self.par_and_sons[self.pop_num+2*s+2].A = self.son3.A.copy()\n self.par_and_sons[self.pop_num+2*s+3].A = self.son4.A.copy()", "def stochastic_universal_selection(self, fitness, num_parents):\n\n fitness_sum = numpy.sum(fitness)\n if fitness_sum == 0:\n self.logger.error(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n raise ZeroDivisionError(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n probs = fitness / fitness_sum\n probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.\n probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.\n\n curr = 0.0\n\n # Calculating the probabilities of the solutions to form a roulette wheel.\n for _ in range(probs.shape[0]):\n min_probs_idx = numpy.where(probs == numpy.min(probs))[0][0]\n probs_start[min_probs_idx] = curr\n curr = curr + probs[min_probs_idx]\n probs_end[min_probs_idx] = curr\n probs[min_probs_idx] = 99999999999\n\n pointers_distance = 1.0 / self.num_parents_mating # Distance between different pointers.\n first_pointer = numpy.random.uniform(low=0.0, \n high=pointers_distance, \n size=1)[0] # Location of the first pointer.\n\n # Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.\n if self.gene_type_single == True:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=self.gene_type[0])\n else:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=object)\n\n parents_indices = []\n\n for parent_num in range(num_parents):\n rand_pointer = first_pointer + parent_num*pointers_distance\n for idx in range(probs.shape[0]):\n if (rand_pointer >= probs_start[idx] and rand_pointer < probs_end[idx]):\n parents[parent_num, :] = self.population[idx, :].copy()\n parents_indices.append(idx)\n break\n\n return parents, numpy.array(parents_indices)", "def crossover(f,P_c_min,P_c_max,i,D,V,P,U):\n #ADAPTIVE Crossover\n if f[i] < np.mean(f):\n P_c = P_c_min + (P_c_max-P_c_min)*((f[i]-np.mean(f))/(np.max(f)-np.mean(f)))\n else:\n P_c = P_c_min\n\n delta = np.random.randint(0,D-1) \n for j in np.arange(D):\n if np.random.uniform(0,1) <= P_c or delta == j:\n U[i,j] = V[j]\n else:\n U[i,j]=P[i,j]\n\n return U", "def GTreeGPCrossoverSinglePoint(genome, **args):\n # print \"CrossoverAAAAAAAAAAA\"\n sister = None\n brother = None\n\n gMom = args[\"mom\"].clone()\n gDad = args[\"dad\"].clone()\n\n gMom.resetStats()\n gDad.resetStats()\n\n max_depth = gMom.getParam(\"max_depth\", None)\n max_attempt = gMom.getParam(\"max_attempt\", 15)\n\n if max_depth is None:\n Util.raiseException(\"You must specify the max_depth genome parameter !\", ValueError)\n\n if max_depth < 0:\n Util.raiseException(\n \"The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !\", ValueError)\n\n momRandom = None\n dadRandom = None\n\n for i in xrange(max_attempt):\n\n dadRandom = gDad.getRandomNode()\n\n if dadRandom.getType() == Consts.nodeType[\"TERMINAL\"]:\n momRandom = gMom.getRandomNode(1)\n elif dadRandom.getType() == Consts.nodeType[\"NONTERMINAL\"]:\n momRandom = gMom.getRandomNode(2)\n\n mD = gMom.getNodeDepth(momRandom)\n dD = gDad.getNodeDepth(dadRandom)\n\n # Two nodes are root\n if mD == 0 and dD == 0:\n continue\n\n mH = gMom.getNodeHeight(momRandom)\n if dD + mH > max_depth:\n continue\n\n dH = gDad.getNodeHeight(dadRandom)\n if mD + dH > max_depth:\n continue\n\n break\n\n if i == (max_attempt - 1):\n assert gMom.getHeight() <= max_depth\n return gMom, gDad\n else:\n nodeMom, nodeDad = momRandom, dadRandom\n\n nodeMom_parent = nodeMom.getParent()\n nodeDad_parent = nodeDad.getParent()\n\n # Sister\n if args[\"count\"] >= 1:\n sister = gMom\n nodeDad.setParent(nodeMom_parent)\n\n if nodeMom_parent is None:\n sister.setRoot(nodeDad)\n else:\n nodeMom_parent.replaceChild(nodeMom, nodeDad)\n sister.processNodes()\n assert sister.getHeight() <= max_depth\n\n # Brother\n if args[\"count\"] == 2:\n brother = gDad\n nodeMom.setParent(nodeDad_parent)\n\n if nodeDad_parent is None:\n brother.setRoot(nodeMom)\n else:\n nodeDad_parent.replaceChild(nodeDad, nodeMom)\n brother.processNodes()\n assert brother.getHeight() <= max_depth\n\n return sister, brother", "def genetic_algorithm(population, lamda):\n maxGenerations = 5000\n generations_count = 0\n while generations_count <= maxGenerations:\n new_population = []\n generations_count += 1\n for i in range(0, len(population)):\n x = random_select(population, lamda)\n y = random_select(population, lamda)\n child = cross_over(x, y)\n child = mutate(child)\n new_population.append(child)\n population = new_population\n # Test for result\n conflicts = find_conflicts(population[i])\n if conflicts == 0:\n return True, population[i], generations_count\n return False, None, maxGenerations", "def feature_selection(x_train, y_train, nb_feats=150):\n cs = np.zeros(x_train.shape[1])\n for f in range(x_train.shape[1]):\n if np.isclose(np.sum(x_train[:, f]), 0):\n cs[f] = 0\n continue\n\n cs[f], p = spearmanr(x_train[:, f], np.mean(y_train, axis=1))\n select = np.argsort(np.abs(cs))[np.max([-nb_feats, -len(cs)]):]\n return select", "def selection(self,parents,popSize):\n for i in range(popSize):\n idx1 = np.random.randint(0,popSize)\n idx2 = np.random.randint(0,popSize)\n if parents.individuals[idx1].violationSum < parents.individuals[idx2].violationSum:\n self.individuals[i] = parents.individuals[idx1]\n elif parents.individuals[idx1].violationSum > parents.individuals[idx2].violationSum:\n self.individuals[i] = parents.individuals[idx2]\n elif parents.individuals[idx1].objectiveFunction[0] < parents.individuals[idx2].objectiveFunction[0]:\n self.individuals[i] = parents.individuals[idx1]\n else:\n self.individuals[i] = parents.individuals[idx2]\n \"\"\"\n print(\"Offsprings(self) Impresso dentro de selection (FIM).\")\n self.printPopulation(popSize)\n print(\"Parents Impresso dentro de selection (FIM).\")\n parents.printPopulation(popSize)\n \"\"\"", "def aux_best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\tpercentile_mask = build_mask(percentile_mask, used_features)\n\tmask_save_path = os.path.join('feature_selection_masks', 'assin2_percentile_based_mask.txt')\n\tdebug_data(percentile_mask, mask_save_path)\n\n\treturn percentile_train_features_selected, percentile_test_features_selected, percentile_selector", "def _selection(self) -> None:\n # The size of the new population must be the same as the prev. one\n max_size_of_pop = self._pop_size\n\n # Copy 50% of best chromosomes to the next generation\n num_of_pop_to_next_gen = round(self._pop_size / 2)\n max_size_of_pop -= num_of_pop_to_next_gen\n self._population = self._population[0:num_of_pop_to_next_gen]\n\n # Mutate 25% of the prev. population and add to the next generation\n num_of_mutated_to_next_gen = round(max_size_of_pop / 2)\n max_size_of_pop -= num_of_mutated_to_next_gen\n for i in range(num_of_mutated_to_next_gen):\n # Mutate one member from the prev. generation\n img, _ = self._population[i]\n new_mutated_member = self._mutate(img)\n\n # Apply more mutation to one chromosome(from 0 to 100)\n for i in range(rand.randint(0, 100)):\n new_mutated_member = self._mutate(new_mutated_member)\n\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the mutated chromosome to the next generation\n self._population.append((new_mutated_member, fitval))\n\n # For remaining 25% of the prev. population do crossing overs\n num_of_crossing_overs_to_next_gen = max_size_of_pop\n max_size_of_pop -= num_of_crossing_overs_to_next_gen\n\n for i in range(num_of_crossing_overs_to_next_gen):\n # Choose 2 chromosomes, then do one crossing over\n img_ext_1, _ = self._population[i]\n img_ext_2, _ = self._population[rand.randint(0, num_of_pop_to_next_gen)]\n\n new_mutated_member = self._crossing_over(img_ext_1, img_ext_2)\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the derived chromosome to the next generation.\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((new_mutated_member, fitval))\n\n # Sort the new generation in increasing order based on the fitness value of each chromosome\n self._population.sort(key=lambda x: x[1])\n print(f'Best chromosome fit value: {self._population[0][1]}')", "def selectTourney(population, fitnessFunction, nrOfContenders = 2, highIsGood = True):\n populationSize = len(population)\n selectedPopulation = []\n\n #select until original populationSize is reached\n while len(selectedPopulation) < populationSize:\n\n if highIsGood:\n bestFitness = -math.inf\n else:\n bestFitness = math.inf\n\n pastContenders = []\n\n for i in range(nrOfContenders):\n #choose a contender randomly. Make sure that contenders are not equal\n while True:\n contenderNr = round(random.uniform(0, populationSize - 1))\n if contenderNr in pastContenders:\n continue\n pastContenders.append(contenderNr)\n break\n\n contender = population[contenderNr]\n fitnessOfContender = fitnessFunction(contender)\n\n if (highIsGood and fitnessOfContender > bestFitness) or \\\n (not highIsGood and fitnessOfContender < bestFitness):\n winner = contender\n bestFitness = fitnessOfContender\n\n selectedPopulation.append(copy.deepcopy(winner))\n\n return selectedPopulation", "def random_select(population, lamda):\n fitness_population = []\n for i in population:\n f_i = fitness_function(i, lamda)\n fitness_population.append(f_i)\n pList = selection_probability(fitness_population)\n rnd_indices = np.random.choice(len(population), p=pList)\n choice = population[rnd_indices]\n return choice", "def selection(self):\n\n # sort the generation according to fitness.\n self.sortByFitness()\n # get the fitness sum.\n fitnessSum = 0\n for outfit in self.currentGeneration:\n fitnessSum += self.applyFitness(outfit)\n # generate a random number\n stop = random.uniform(0, 1)\n accumulated = 0\n offset = 0\n for outfit in self.currentGenerationSorted:\n fitness = self.applyFitness(outfit) + offset\n probability = fitness / fitnessSum\n accumulated += probability\n\n if stop <= accumulated:\n return outfit", "def test_uniform_search_produces_forest(graph):\n g = graph()\n \n if hasattr(g, 'edge_weight'):\n edge_weight = g.edge_weight\n else:\n edge_weight = defaultdict(int)\n\n # Create a visitor that will produce a forest\n class ForestVisitor(TraversalVisitor):\n def __init__(self):\n TraversalVisitor.__init__(self)\n self.forest = yaupon.Forest()\n \n def tree_edge(self, e):\n # This will throw from inside \"traverse\" if a cycle is created\n self.forest.add_edge(e[0],e[1])\n\n forest_visitor = ForestVisitor()\n traverse(g.vertices(), forest_visitor, \n uniform_cost_generator(g, edge_weight))", "def get_selected_subsamples(sample_func, clusters, trajs_dict, visit_profile, Nsample, false_rate=80):\n print('The desired false rate is %f'%(false_rate/Nsample))\n crter = 0\n done_first_round = False\n nclusters = len(clusters)\n \n print('Start the first selection until the number of potential profiles is more than Nsample')\n while crter < Nsample:\n i = np.random.choice(range(nclusters))\n if len(clusters[i]) > Nsample*5 or len(clusters[i]) < Nsample: continue\n # try sampling\n selected_spl, plist_spl = sample_func(trajs_dict, plist=None, usrs=clusters[i])\n # do the deterministic attack\n a2 = get_trick_mat(clusters[i] , selected_spl, visit_profile)\n nonzero_list = [np.sum(np.count_nonzero(ai))>=1 for ai in make_sym_mat(a2)] \n crter = np.sum(nonzero_list)\n \n print('Finish the first round selection, %d candidates are selected from cluster %d'%(crter, i))\n round_one_usrs = np.array(clusters[i])[nonzero_list]\n \n crter2 = 0; len_rone = len(round_one_usrs)\n print('Start the second selection until false rate %f'%(false_rate/Nsample))\n while crter2 < false_rate:\n final_selected_usrs = round_one_usrs[np.random.choice(len_rone, Nsample, replace=False)]\n tmp = get_trick_mat(final_selected_usrs, selected_spl, visit_profile)\n crter2 = np.sum([np.sum(np.count_nonzero(ai))>=1 for ai in make_sym_mat(tmp)])\n print('Final false rate for deterministic attack%f'%(crter2/Nsample))\n return selected_spl, final_selected_usrs, plist_spl", "def fitness_proportionate_selection(random, population, args):\r\n num_selected = args.setdefault('num_selected', 1)\r\n len_pop = len(population)\r\n psum = [i for i in range(len_pop)]\r\n pop_max_fit = (max(population)).fitness\r\n pop_min_fit = (min(population)).fitness\r\n \r\n # If we're actually doing minimimization,\r\n # fitness proportionate selection is not defined.\r\n if pop_max_fit < pop_min_fit:\r\n raise ValueError('Fitness proportionate selection is not valid for minimization.')\r\n \r\n # Set up the roulette wheel\r\n if pop_max_fit == pop_min_fit:\r\n psum = [(index + 1) / float(len_pop) for index in range(len_pop)]\r\n elif (pop_max_fit > 0 and pop_min_fit >= 0) or (pop_max_fit <= 0 and pop_min_fit < 0):\r\n population.sort(reverse=True)\r\n psum[0] = population[0].fitness\r\n for i in range(1, len_pop):\r\n psum[i] = population[i].fitness + psum[i-1]\r\n for i in range(len_pop):\r\n psum[i] /= float(psum[len_pop-1])\r\n \r\n # Select the individuals\r\n selected = []\r\n for _ in range(num_selected):\r\n cutoff = random.random()\r\n lower = 0\r\n upper = len_pop - 1\r\n while(upper >= lower):\r\n mid = (lower + upper) // 2\r\n if psum[mid] > cutoff: \r\n upper = mid - 1\r\n else: \r\n lower = mid + 1\r\n lower = max(0, min(len_pop-1, lower))\r\n selected.append(population[lower])\r\n return selected", "def population_selection(population, sack, max_weight):\n sorted_population = population_performance(population, sack, max_weight)\n new_gen = []\n \n for fit_member in range(len(sorted_population) - 2): #killing two weakest\n new_gen.append(sorted_population[fit_member][0])\n\n return new_gen", "def randomforest_cv(self, nsplits: int = 5) -> (float, float, float):\r\n params = {\r\n \"n_estimators\": [20, 50, 100, 200],\r\n \"max_depth\": [2, 3, 5, 8, 10, 15, 20],\r\n }\r\n model = RandomForestClassifier()\r\n gridcv = GridSearchCV(model, params, cv=nsplits)\r\n gridcv.fit(self.x, self.y)\r\n best_params = gridcv.best_params_\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = RandomForestClassifier(**best_params).fit(x_train, y_train)\r\n y_predict = model.predict(x_test)\r\n acc_result.append(binary_acc(y_test, y_predict))\r\n return np.mean(acc_result), np.std(acc_result), best_params", "def RForest(x, y, s):\n usx = np.array(x)\n usy = np.array(y)\n\n # split data into train and validation set\n x_train, x_test, y_train, y_test = train_test_split(usx, usy, test_size=s)\n clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)\n clf.fit(x_train, y_train)\n y_predict = clf.predict(x_test)\n\n # select only the probabilities of being fraud\n y_pred_prob = clf.predict_proba(x_test)[:, 1]\n return y_predict, y_test, y_pred_prob", "def feature_select(T_train, y_train, k=3):\r\n from sklearn.metrics import roc_auc_score\r\n\r\n T_principle_index = list() # The index of Principle features\r\n AUC = list()\r\n\r\n # Forward Stage-wise algorithm\r\n for j in range(k):\r\n auc = list()\r\n Dist = list()\r\n\r\n for i in range(len(T_train.T)):\r\n if i in T_principle_index:\r\n auc.append(0)\r\n Dist.append(0)\r\n continue\r\n T_temp_index = T_principle_index + [i]\r\n T0 = T_train.T[T_temp_index].T # transient statistics\r\n C0 = gen_center(T0, y_train)\r\n dist = util.distance(T0, C0) # distance\r\n Dist.append(dist)\r\n auc.append(roc_auc_score(y_train, -dist))\r\n\r\n m = np.argmax(auc)\r\n T_principle_index = T_principle_index + [m]\r\n AUC.append(auc[m])\r\n\r\n dist = Dist[m]\r\n T_principle_index = np.array(T_principle_index)\r\n T_principle = T_train.T[T_principle_index].T\r\n\r\n return T_principle, T_principle_index, np.array(dist), AUC", "def Genetic_Algorithm(Population, Lambda, l, data):\n if Population.Population_size == 1: # Used in case of different population sizes\n picked_individuals = np.random.permutation(Population.Population_size)[:4].tolist()*4\n else:\n # Selecting 4 different individuals from the population\n picked_individuals = np.random.permutation(Population.Population_size)[:4].tolist()\n\n # Initializing child of the selected individuals\n child_assets = []\n child_proportions = []\n child_weights = np.zeros(N)\n l = 0\n\n #Pool_1\n pair_1_assets = [Population.population_assets[picked_individuals[0]], Population.population_assets[picked_individuals[1]]]\n pair_1_fitness = [Population.fitness[picked_individuals[0]], Population.fitness[picked_individuals[1]]]\n pair_1_proportions = [Population.population_proportions[picked_individuals[0]], Population.population_proportions[picked_individuals[1]]]\n\n # Pool_2\n pair_2_assets = [Population.population_assets[picked_individuals[2]], Population.population_assets[picked_individuals[3]]]\n pair_2_fitness = [Population.fitness[picked_individuals[2]], Population.fitness[picked_individuals[3]]]\n pair_2_proportions = [Population.population_proportions[picked_individuals[2]], Population.population_proportions[picked_individuals[3]]]\n\n # Selecting parents for the uniform crossover\n parent_1_assets = pair_1_assets[pair_1_fitness.index(min(pair_1_fitness))]\n parent_1_proportions = pair_1_proportions[pair_1_fitness.index(min(pair_1_fitness))]\n\n parent_2_assets = pair_2_assets[pair_2_fitness.index(min(pair_2_fitness))]\n parent_2_proportions = pair_2_proportions[pair_2_fitness.index(min(pair_2_fitness))]\n\n # Looking for same assets in parents and inputting them into child\n common_assets = []\n for i in parent_1_assets:\n if i in parent_2_assets:\n common_assets.append(i)\n child_assets += common_assets\n\n # Finding out what are the indexes of those assets in parents\n indexes_1 = []\n indexes_2 = []\n for i in common_assets:\n indexes_1.append(parent_1_assets.index(i))\n indexes_2.append(parent_2_assets.index(i))\n\n # Adding the proportions of same assets to child with 50% chance\n for m, h in zip(indexes_1, indexes_2):\n rand_1 = np.random.rand()\n if rand_1 > 0.5:\n child_proportions.append(parent_1_proportions[m])\n else:\n child_proportions.append(parent_2_proportions[h])\n\n # Creating new lists with assets that each parent don't have in common\n temp_parent_1_assets = []\n temp_parent_2_assets = []\n for m, h in zip(parent_1_assets, parent_2_assets):\n temp_parent_1_assets.append(m)\n temp_parent_2_assets.append(h)\n\n for i in common_assets:\n if i in temp_parent_1_assets:\n temp_parent_1_assets.remove(i)\n\n for i in common_assets:\n if i in temp_parent_2_assets:\n temp_parent_2_assets.remove(i)\n\n # Adding other assets and their corresponding proportions to the child\n for m, h in zip(temp_parent_1_assets, temp_parent_2_assets):\n rand_2 = np.random.rand()\n if rand_2 > 0.5:\n child_assets.append(m)\n child_proportions.append(parent_1_proportions[parent_1_assets.index(m)])\n else:\n child_assets.append(h)\n child_proportions.append(parent_2_proportions[parent_2_assets.index(h)])\n\n # Creating A*\n # A* is a set of assets that are in the parents, but are not in the child (together with their associated values)\n parent_minus_child_assets = []\n parent_minus_child_proportions = []\n for m, h in zip(parent_1_assets, parent_2_assets):\n if m not in child_assets:\n parent_minus_child_assets.append(m)\n parent_minus_child_proportions.append(parent_1_proportions[parent_1_assets.index(m)])\n if h not in child_assets:\n parent_minus_child_assets.append(h)\n parent_minus_child_proportions.append(parent_2_proportions[parent_2_assets.index(h)])\n\n # Assets that can be potentially added to the child in case parent_minus_child assets (A*) are empty\n other_assets = np.random.permutation(N).tolist()\n for i in other_assets:\n if i in child_assets:\n other_assets.remove(i)\n\n # Mutation\n mutated_asset = np.random.choice(child_proportions)\n rand_3 = np.random.rand()\n if rand_3 > 0.5:\n child_proportions[child_proportions.index(mutated_asset)] = (0.9 * (data.epsilon + mutated_asset) - data.epsilon) # m=1\n else:\n child_proportions[child_proportions.index(mutated_asset)] = (1.1 * (data.epsilon + mutated_asset) - data.epsilon) # m=2\n mutated_child_proportions = child_proportions\n\n # Making sure the child does not have two identical assets\n for i in child_assets:\n if child_assets.count(i) > 1:\n mutated_child_proportions.remove(mutated_child_proportions[child_assets.index(i)])\n child_assets.remove(i)\n\n # Making sure all child proportion are between 0 and 1 (if not they get excluded)\n for i in mutated_child_proportions:\n if i < 0 or i > 1:\n child_assets.remove(child_assets[mutated_child_proportions.index(i)])\n mutated_child_proportions.remove(i)\n\n # Ensure that child has exactly 10 assets and proportions\n while len(child_assets) > data.K and len(mutated_child_proportions) > data.K:\n child_assets.remove(child_assets.index(min(mutated_child_proportions)))\n mutated_child_proportions.remove(min(mutated_child_proportions))\n\n # Add assets from A* to child\n while len(child_assets) < data.K and len(mutated_child_proportions) < data.K:\n if len(parent_minus_child_assets) != 0:\n rand_4 = np.random.choice(parent_minus_child_assets)\n child_assets.append(rand_4)\n mutated_child_proportions.append(parent_minus_child_proportions[parent_minus_child_assets.index(rand_4)])\n parent_minus_child_proportions.remove(parent_minus_child_proportions[parent_minus_child_assets.index(rand_4)])\n parent_minus_child_assets.remove(rand_4)\n for i in mutated_child_proportions:\n if i < 0 or i > 1:\n child_assets.remove(child_assets[mutated_child_proportions.index(i)])\n mutated_child_proportions.remove(i)\n for i in child_assets:\n if child_assets.count(i) > 1:\n mutated_child_proportions.remove(mutated_child_proportions[child_assets.index(i)])\n child_assets.remove(i)\n\n else: #In case A* is empty\n rand_5=np.random.choice(other_assets)\n child_assets.append(rand_5)\n other_assets.remove(rand_5)\n mutated_child_proportions.append(0)\n for i in mutated_child_proportions:\n if i < 0 or i > 1:\n child_assets.remove(child_assets[mutated_child_proportions.index(i)])\n mutated_child_proportions.remove(i)\n for i in child_assets:\n if child_assets.count(i) > 1:\n mutated_child_proportions.remove(mutated_child_proportions[child_assets.index(i)])\n child_assets.remove(i)\n\n # Given large amount of iterations and randomness all child proportions could be 0 hence set 1 at random to 0.01\n # Does not influence the overall result as it ist immediately replaced by a stronger individual\n if sum(mutated_child_proportions) == 0:\n mutated_child_proportions[mutated_child_proportions.index(np.random.choice(mutated_child_proportions))]= 0.01\n\n # Evaluating child\n mutated_child_proportions = np.array(mutated_child_proportions)\n L = mutated_child_proportions.sum()\n w_temp = data.epsilon + mutated_child_proportions * data.F / L\n is_too_large = (w_temp > data.delta)\n while is_too_large.sum() > 0:\n is_not_too_large = np.logical_not(is_too_large)\n L = mutated_child_proportions[is_not_too_large].sum()\n F_temp = 1.0 - (data.epsilon * is_not_too_large.sum() + data.delta * is_too_large.sum())\n w_temp = data.epsilon + mutated_child_proportions * F_temp / L\n w_temp[is_too_large] = data.delta\n is_too_large = (w_temp > data.delta)\n\n # Assigning weights to child\n child_weights[:] = 0\n child_weights[child_assets] = w_temp\n mutated_child_proportions = w_temp - data.epsilon\n\n # Calculating child fitness\n obj1 = np.sum((child_weights * child_weights.reshape((child_weights.shape[0], 1))) * data.sigma)\n obj2 = np.sum(child_weights * data.mu)\n child_fitness = Lambda[l] * obj1 - (1 - Lambda[l]) * obj2\n\n # Checking whether child is valid\n Population.check_valid_solution(child_weights, mutated_child_proportions, child_assets, data)\n\n # Substituting child into the population and removing the weakest member\n index_worst_member = np.argmax(Population.fitness)\n Population.fitness[index_worst_member] = child_fitness\n Population.population_proportions[index_worst_member] = mutated_child_proportions\n Population.population_weights[index_worst_member] = child_weights\n Population.population_assets[index_worst_member] = child_assets\n Population.Obj1[index_worst_member] = obj1\n Population.Obj2[index_worst_member] = obj2\n\n # Finding the best member of the population\n index_best_member = np.argmin(Population.fitness)\n Population.best_fitness = Population.fitness[index_best_member]\n Population.best_proportions = Population.population_proportions[index_best_member]\n Population.best_weights = Population.population_weights[index_best_member]\n Population.best_assets = Population.population_assets[index_best_member]\n Population.best_covariance = Population.Obj1[index_best_member]\n Population.best_return = Population.Obj2[index_best_member]\n\n\n return Population.best_fitness, Population.best_proportions, Population.best_assets, Population.best_weights, Population.best_covariance, Population.best_return", "def sub_select_features(features, strategy):\n\n def extract_one_index(y_val):\n index_ones = []\n y_prev = 0\n start_stop = []\n if y_val[-1] == 1:\n y_val = y_val.tolist() + [0]\n for i, y in enumerate(y_val):\n if y_prev == 0 and y == 1:\n start_stop = [i]\n if y_prev == 1 and y == 0:\n start_stop.append(i)\n index_ones.append(start_stop)\n y_prev = y\n return index_ones\n\n def wrapper(start_stop, maxi):\n size = start_stop[1] - start_stop[0]\n bound = (size+1)//2\n return [max(0, start_stop[0]-bound), min(maxi, start_stop[1]+bound)]\n\n def deduce_index_to_keep(one_index, maxi):\n wrapped = [wrapper(start_stop, maxi) for start_stop in one_index]\n to_keep = [idx for idx in range(wrapped[0][0], wrapped[0][1])]\n for start_stop in wrapped[1:]:\n to_keep += [idx for idx in range(start_stop[0], start_stop[1]) if idx > to_keep[-1]]\n return to_keep\n\n if strategy == 0:\n new_features = features # We do nothing\n\n else:\n new_features = dict()\n for which in ['train', 'test']:\n one_id = extract_one_index(features['y_'+which])\n true_idx = deduce_index_to_keep(one_id, len(features['y_'+which]))\n try:\n new_features['x_'+which] = features['x_'+which][true_idx]\n new_features['y_'+which] = features['y_'+which][true_idx]\n except IndexError as e:\n print(which)\n print(features['x_'+which].shape)\n print(features['y_'+which].shape)\n print(one_id)\n raise e\n\n return new_features", "def evaluate_random_forest(y_test, y_pred):", "def selectFeatures(k_features=5, *args):\n X, y = args\n skb = SelectKBest(k=k_features)\n return skb.fit_transform(X, y)", "def select(self, solutions):\r\n solutions = self.sort_solutions(solutions)\r\n # define coordinates for the two groups\r\n elitists_coords = [x for x in range(self.breeding_rules.elitist_candidates)]\r\n first_discarded_solution = int(len(solutions) - (len(solutions) * self.breeding_rules.discard_rate))\r\n crossover_coords = [x for x in range(first_discarded_solution)]\r\n # fill each breeding group with its possible participants, based on the coordinates defined above\r\n elitists = [solutions[x] for x in elitists_coords]\r\n crossover = [solutions[x] for x in crossover_coords]\r\n return elitists, crossover", "def member_crossover(population):\n gene1 = population[random.randint(0, int(len(population) - 1))]\n gene2 = population[random.randint(0, int(len(population) - 1))]\n split = random.randint(1, int(len(population[0]) - 1))\n new_gene1 = gene1[:split] + gene2[split:]\n new_gene2 = gene2[:split] + gene1[split:]\n\n return new_gene1, new_gene2", "def findRFBestN():\n resultList = []\n BestScore = 0\n nList = [ n for n in range(1,200) if n%10 == 0]\n for n in nList:\n rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=n)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_n = n\n resultList += [[n, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding n_estimator is: ')\n return BestScore, best_n", "def selection_profiles_by_chance(fits_path, dataset1, dataset2):\n fits = h5py.File(fits_path, 'r')\n true = np.median(fits[dataset1]['coupling_coefs'][:], axis=0)\n compare = np.median(fits[dataset2]['coupling_coefs'][:], axis=0)\n\n n_neurons, M = true.shape\n probabilities = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n n = np.count_nonzero(true[neuron])\n N = np.count_nonzero(compare[neuron])\n rv = hypergeom(M=M, n=n, N=N)\n\n overlap = np.count_nonzero(true[neuron] * compare[neuron])\n probabilities[neuron] = 1 - rv.cdf(x=overlap)\n\n return probabilities", "def crossover(new_pop, k):\n shuffle(new_pop)\n for i in range(len(new_pop) // 2):\n points = random.sample(range(1, len(new_pop[i])), k)\n points.sort()\n for fold in range(k):\n x = points[fold]\n tmp = new_pop[2 * i][:x].copy()\n new_pop[2 * i][:x], new_pop[2 * i + 1][:x] = new_pop[2 * i +\n 1][:x], tmp\n return new_pop", "def cross_over(self, father: Tour, mother: Tour) -> Tour:\n startGene = int(random.random() * father.size())\n endGene = int(random.random() * father.size())\n\n # Swap 2 position if start > end\n if startGene > endGene:\n startGene, endGene = endGene, startGene\n\n # Init child as list\n child = [None for i in range(father.size())]\n\n # print(startGene, endGene)\n for i in range(startGene, endGene):\n child[i] = father.tour_ids[i]\n \n mother_idx = 0\n for i in range(0, father.size()):\n # Fill only empty positions by genes of mother in order\n if child[i] == None:\n while mother_idx < mother.size():\n if mother.tour_ids[mother_idx] not in child:\n child[i] = mother.tour_ids[mother_idx]\n mother_idx += 1\n break\n mother_idx += 1\n\n \n return Tour(self.map.size(), child, self.map)", "def steady_state_selection(self, fitness, num_parents):\n\n fitness_sorted = sorted(range(len(fitness)), key=lambda k: fitness[k])\n fitness_sorted.reverse()\n\n # Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.\n if self.gene_type_single == True:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=self.gene_type[0])\n else:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=object)\n\n for parent_num in range(num_parents):\n parents[parent_num, :] = self.population[fitness_sorted[parent_num], :].copy()\n\n return parents, numpy.array(fitness_sorted[:num_parents])", "def one_point_crossover(graph_first: Any, graph_second: Any, max_depth: int) -> Any:\n pairs_of_nodes = equivalent_subtree(graph_first, graph_second)\n if pairs_of_nodes:\n node_from_graph_first, node_from_graph_second = choice(pairs_of_nodes)\n\n layer_in_graph_first = \\\n graph_first.root_node.distance_to_primary_level - node_from_graph_first.distance_to_primary_level\n layer_in_graph_second = \\\n graph_second.root_node.distance_to_primary_level - node_from_graph_second.distance_to_primary_level\n\n replace_subtrees(graph_first, graph_second, node_from_graph_first, node_from_graph_second,\n layer_in_graph_first, layer_in_graph_second, max_depth)\n return graph_first, graph_second", "def apply_tournament_selection(individuals, tot_rounds: int):\r\n winner = random.choice(individuals)\r\n\r\n for i in range(0, tot_rounds-1):\r\n ind = random.choice(individuals)\r\n\r\n if ind.fitness.dominates(winner.fitness):\r\n winner = ind\r\n elif not winner.fitness.dominates(ind.fitness):\r\n if ind.fitness.crowding_dist < winner.fitness.crowding_dist:\r\n winner = ind\r\n\r\n return winner", "def fitness_sharing(self):\n for gene in self.population:\n gene.fitness = gene.raw_fitness\n return\n def dist(gene1, gene2):\n \"\"\"Return distence between two gene\"\"\"\n return abs(len(gene1.goal) - len(gene2.goal))\n for gene in self.population:\n raw_fitnesses = [e.raw_fitness for e in self.population if dist(e, gene) <= 5]\n gene.fitness = sum(raw_fitnesses) / len(raw_fitnesses)", "def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global", "def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global", "def tournament_selection(self, population: List[IndividualType]) -> List[IndividualType]:\n survivors: List[IndividualType] = []\n for _ in range(self.configuration.n_survivors):\n # Choose participants\n rooster: List[IndividualType] = random.sample(population, self.configuration.rooster_size)\n # Select fittest of participants as survivor\n fittest_individual_of_rooster = self.get_best_individual(rooster)\n population.remove(fittest_individual_of_rooster)\n survivors.append(fittest_individual_of_rooster)\n return survivors", "def cross_below_cross_rate(self):\n p1_index = randint(0, floor(self.population_size * self.cross_rate)-1)\n p2_index = randint(0, floor(self.population_size * self.cross_rate)-1)\n gene_of_p1 = self.population[p1_index]\n gene_of_p2 = self.population[p2_index]\n cross_point = randint(0, int_min(len(gene_of_p1), len(gene_of_p2))-1)\n new_chromosome = []\n new_chromosome += gene_of_p1.chromosome[:cross_point]\n new_chromosome += gene_of_p2.chromosome[cross_point:]\n if (self.tactics.is_unrepeatable(new_chromosome[cross_point])\n and cross_point < len(new_chromosome)-1):\n if new_chromosome[cross_point] == new_chromosome[cross_point+1]:\n del new_chromosome[cross_point]\n return Gene(chromosome=new_chromosome)", "def crossover(NN1, NN2, p_c, p_m):\n if np.random.choice([0, 1], p=[1-p_c, p_c]):\n return nn.mate_neural_nets(NN1, NN2, p_m)\n else:\n return np.random.choice([NN1, NN2])", "def crossOver(self):\n # copy all the chromosomes from the current generation to a regular python list\n # start with an empty list\n lstChromosomes = []\n # loop through all the items in the queue\n while not self.generation.empty():\n # take a chromosome off the queue\n chromosome = self.generation.get()\n # append the chromosome to the list\n lstChromosomes.append(chromosome)\n # create an empty priority queue for the new generation\n newGeneration = PriorityQueue()\n # cross-over all chromosomes in turn - start with the beginning of the list\n for chrom1Index in range(0, len(lstChromosomes)-1):\n # cross-over with all chromosomes that come after it\n for chrom2Index in range(chrom1Index, len(lstChromosomes)):\n # get the chromosomes we are crossing over\n chrom1 = lstChromosomes[chrom1Index]\n chrom2 = lstChromosomes[chrom2Index]\n # perform the cross-over operation\n xOver = chrom1.crossOver(chrom2)\n # create two new chromosome objects\n newChrom1 = self.chromosomeClass()\n newChrom2 = self.chromosomeClass()\n # set their genes to the values created by crossover operation\n newChrom1.genes = xOver[0]\n newChrom2.genes = xOver[1]\n # save the new chromosomes we just created\n newGeneration.put(newChrom1)\n newGeneration.put(newChrom2)\n # save all the original chromosomes\n for chromosome in lstChromosomes:\n newGeneration.put(chromosome)\n # keep track of all the chromosomes we create\n lstChromosomes = []\n # keep track of how many we are keeping\n chromosomesKept = 0\n # as long as we haven't added more chromosomes than the population is supposed to have\n # and we have more chromosomes to add...\n while chromosomesKept < self.populationSize and not newGeneration.empty():\n # take a chromosome off the new generation queue\n newChromosome = newGeneration.get()\n # have we seen this chromosome before?\n if (not newChromosome in lstChromosomes):\n # store it in our list of chromosomes\n lstChromosomes.append(newChromosome)\n # store it in the queue in the chromosome\n self.generation.put(newChromosome)\n # increase our count of chromosomes kept\n chromosomesKept += 1\n # as long as we haven't added more chromosomes than the population is supposed to have, create\n # random chromosomes\n while chromosomesKept < self.populationSize:\n # create a random chromosome\n newChromosome = self.chromosomeClass()\n # have we seen this chromosome before?\n if (not newChromosome in lstChromosomes):\n # store it in our list of chromosomes\n lstChromosomes.append(newChromosome)\n # store it in the queue in the chromosome\n self.generation.put(newChromosome)\n # increase our count of chromosomes kept\n chromosomesKept += 1", "def Crossover_Function(data1, data2):\n\n # for this function, I modified the uniform crossover function to take care of duplicates after crossover.\n\n data1[1] = 0\n data2[1] = 0\n chromosome1 = list.copy(data1[0])\n chromosome2 = list.copy(data2[0])\n\n #print(\"\\nChromosomes before crossover - \")\n #print(chromosome1)\n #print(chromosome2)\n\n # for each index in both chromosomes, use a coin toss to determine which index is crossed over\n for i in range(len(chromosome1)):\n\n cointoss = random.randrange(2)\n if cointoss == 0:\n chromosome1[i], chromosome2[i] = chromosome2[i], chromosome1[i]\n\n # find duplicates after crossing over\n dupes_in_ch1 = list(duplicates(chromosome1))\n dupes_in_ch2 = list(duplicates(chromosome2))\n\n\n # handle duplicates if any are found\n for i in dupes_in_ch1:\n if i in chromosome1: chromosome1.remove(i)\n chromosome2.append(i)\n \n for i in dupes_in_ch2:\n if i in chromosome2: chromosome2.remove(i)\n chromosome1.append(i)\n\n # replaced the modified chromosomes in the data\n data1[0] = chromosome1\n data2[0] = chromosome2\n\n #print(\"\\nChromsomes after crossover - \")\n #print(data1[0])\n #print(data2[0])\n\n return [data1, data2]", "def uniform_crossover(\n self, mating_pop_dict, test=False, pairs=[], crossover_prob={}\n ):\n\n print('Performing crossovers')\n\n # Initialises dictionary of child networks\n crossover_pop_dict = OrderedDict()\n\n if test is False:\n # Selects pairs of networks at random to crossover with each other\n network_num = list(mating_pop_dict.keys())\n random.shuffle(network_num)\n network_num = iter(network_num) # Do not merge with line below,\n # and do not introduce any lines of code between them!\n network_num = list(zip(network_num, network_num))\n else:\n network_num = pairs\n\n # Performs uniform crossover\n for index, network_pair in enumerate(network_num):\n network_num_1 = network_pair[0]\n network_num_2 = network_pair[1]\n mate_1 = copy.deepcopy(mating_pop_dict[network_num_1])\n mate_2 = copy.deepcopy(mating_pop_dict[network_num_2])\n\n for node in list(mate_1.nodes):\n type_1 = mate_1.nodes()[node]['type']\n type_2 = mate_2.nodes()[node]['type']\n if type_1 != type_2:\n raise TypeError(\n 'Difference between type of {} in {} ({} = {}; {} ='\n ' {}) - should be identical'.format(node, network_pair,\n network_num_1, type_1, network_num_2, type_2)\n )\n if type_1 == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = crossover_prob[index][node]\n\n if random_number <= self.crossover_prob:\n # Copy to prevent these dictionaries from updating when the\n # node attributes are updated in the code below (otherwise\n # both nodes will be assigned the same identity as the node\n # in mate_1, instead of the node identities being crossed\n # over)\n mate_1_node_attributes = copy.deepcopy(mate_1.nodes()[node])\n mate_2_node_attributes = copy.deepcopy(mate_2.nodes()[node])\n # mate_1.nodes()[node] = {} does not work, get\n # TypeError: 'NodeView' object does not support item assignment\n for attribute in list(mate_1.nodes()[node].keys()):\n del mate_1.nodes()[node][attribute]\n for attribute in list(mate_2.nodes()[node].keys()):\n del mate_2.nodes()[node][attribute]\n nx.set_node_attributes(mate_1, values={node: mate_2_node_attributes})\n nx.set_node_attributes(mate_2, values={node: mate_1_node_attributes})\n\n crossover_pop_dict[network_num_1] = mate_1\n crossover_pop_dict[network_num_2] = mate_2\n\n return crossover_pop_dict", "def select_features(self):\r\n \r\n features_list = list(self.feed_data.columns.values)\r\n features_list.remove(\"min_time\")\r\n thisrace = self.config.race_to_predict\r\n\r\n #if never ran race before, don't include these variables in feature\r\n #selection, they're just 0's anyway\r\n if self.config.first_time_running_race == True:\r\n unuseable_columns = [('min_time', thisrace),('std', thisrace),('num_races', thisrace),\r\n ('rainfall', thisrace),\r\n ('temp', thisrace),\r\n ('wind', thisrace),\r\n ('metersup', thisrace), \r\n 'sex_W']\r\n else:\r\n #drop this column...probs should have removed it earlier. \r\n unuseable_columns = ['sex_W']\r\n #print(features_list)\r\n for element in unuseable_columns:\r\n features_list.remove(element)\r\n data_with_all_feats = self.feed_data.drop(unuseable_columns,axis=1)\r\n colstodrop = features_list\r\n thiscols = []\r\n data_with_current_feats = data_with_all_feats.drop(features_list,axis=1)\r\n checkfit=100.0\r\n scores = []\r\n dropped_cols = []\r\n loopgain =True\r\n #mymod = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=10,\r\n # min_samples_split = 25, criterion='mse')\r\n thisloopfeatures_list = features_list\r\n curcols = data_with_current_feats.columns\r\n countgain=0\r\n #print(\"cc\",curcols)\r\n while loopgain == True:\r\n thisloopscore=100.0\r\n for fet in thisloopfeatures_list:\r\n data_with_current_feats[fet] = data_with_all_feats[fet]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=15,\r\n min_samples_split = 12, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n if ((thisloopscore - oobs) > 0.0):\r\n thisloopscore = oobs\r\n fetwinner = fet\r\n data_with_current_feats.drop(fet,axis=1,inplace=True)\r\n etrain.drop(fet,axis=1,inplace=True)\r\n\r\n data_with_current_feats[fetwinner] = data_with_all_feats[fetwinner]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n #print(fetwinner,predscore)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n #print(fetwinner,\"~\",oobs)\r\n thisloopfeatures_list.remove(fetwinner)\r\n if ((checkfit-oobs)>0.0001):\r\n checkfit = oobs\r\n curcols = data_with_current_feats.columns\r\n #print(curcols)\r\n else:\r\n break\r\n\r\n\r\n self.final_df = self.feed_data[data_with_current_feats.columns]\r\n self.Xtrain=self.final_df.sample(frac=0.8,random_state=200)\r\n self.Xtest=self.final_df.drop(self.Xtrain.index)#\r\n self.ytrain = self.Xtrain.pop('min_time')\r\n self.ytest = self.Xtest.pop('min_time')\r\n self.model= RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n self.model.fit(self.Xtrain,self.ytrain)\r\n #print(y)\r\n return", "def segmented_crossover(\n self, mating_pop_dict, test=False, pairs=[], crossover_prob={}\n ):\n\n print('Performing crossovers')\n\n # Initialises dictionary of child networks\n crossover_pop_dict = OrderedDict()\n\n if test is False:\n # Selects pairs of networks at random to crossover with each other\n network_num = list(mating_pop_dict.keys())\n random.shuffle(network_num)\n network_num = iter(network_num) # Do not merge with line below,\n # and do not introduce any lines of code between them!\n network_num = list(zip(network_num, network_num))\n else:\n network_num = pairs\n\n # Performs segmented crossover\n for index, network_pair in enumerate(network_num):\n network_num_1 = network_pair[0]\n network_num_2 = network_pair[1]\n mate_1 = copy.deepcopy(mating_pop_dict[network_num_1])\n mate_2 = copy.deepcopy(mating_pop_dict[network_num_2])\n\n swap = False\n for node in list(mate_1.nodes):\n type_1 = mate_1.nodes()[node]['type']\n type_2 = mate_2.nodes()[node]['type']\n if type_1 != type_2:\n raise TypeError(\n 'Difference between type of {} in {} ({} = {}; {} ='\n ' {}) - should be identical'.format(node, network_pair,\n network_num_1, type_1, network_num_2, type_2)\n )\n if type_1 == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = crossover_prob[index][node]\n\n if swap is False:\n if random_number <= self.swap_start_prob:\n swap = True\n else:\n swap = False\n elif swap is True:\n if random_number <= self.swap_stop_prob:\n swap = False\n else:\n swap = True\n\n if swap is True:\n # Copy to prevent these dictionaries from updating when the\n # node attributes are updated in the code below (otherwise\n # both nodes will be assigned the same identity as the node\n # in mate_1, instead of the node identities being crossed\n # over)\n mate_1_attributes = copy.deepcopy(mate_1.nodes()[node])\n mate_2_attributes = copy.deepcopy(mate_2.nodes()[node])\n # mate_1.nodes()[node] = {} does not work, get\n # TypeError: 'NodeView' object does not support item assignment\n for attribute in list(mate_1.nodes()[node].keys()):\n del mate_1.nodes()[node][attribute]\n for attribute in list(mate_2.nodes()[node].keys()):\n del mate_2.nodes()[node][attribute]\n nx.set_node_attributes(mate_1, values={node: mate_2_attributes})\n nx.set_node_attributes(mate_2, values={node: mate_1_attributes})\n\n crossover_pop_dict[network_num_1] = mate_1\n crossover_pop_dict[network_num_2] = mate_2\n\n return crossover_pop_dict", "def crossover(population, kw=None, **kwargs):\n future_population = []\n while len(future_population) < len(population):\n p1, p2 = random.choice(population)['notes'], random.choice(population)['notes']\n split = random.randint(1, len(p1) - 1)\n map(future_population.append, [p1[:split] + p2[split:], p2[:split] + p1[split:]])\n return future_population", "def random_forest_classifier(train_x, train_y, valid_x, valid_y):\n clf = RandomForestClassifier(n_estimators=25)\n clf.fit(train_x, train_y)\n sig_clf = CalibratedClassifierCV(clf, method=\"sigmoid\", cv=\"prefit\")\n sig_clf.fit(valid_x, valid_y)\n return clf, sig_clf", "def test_shap_interaction_values_2(self):\n for model in [ske.RandomForestRegressor(n_estimators=1), ske.RandomForestClassifier(n_estimators=1)]:\n self.x_df = self.x_df.astype(float)\n model.fit(self.x_df, self.y_df)\n print(model)\n explainer = shap.TreeExplainer(model)\n interaction_values = get_shap_interaction_values(self.x_df, explainer)\n assert interaction_values.shape[0] == self.x_df.shape[0]\n assert interaction_values.shape[1] == self.x_df.shape[1]\n assert interaction_values.shape[2] == self.x_df.shape[1]", "def CrossoverOX1(p1,p2):\n countryNo=len(p1)\n [start,end] = sorted(random.sample(range(1,countryNo),2))\n ch1 = [0]+[-1 for i in range(1,len(p1))]\n ch2 = [0]+[-1 for i in range(1,len(p1))]\n for i in range(1,countryNo):\n if i>=start and i<=end:\n ch1[i]=p1[i]\n ch2[i]=p2[i]\n for i in range(1,countryNo):\n if p2[i] not in ch1:\n ch1[ch1.index(-1)]=p2[i]\n for i in range(1,countryNo):\n if p1[i] not in ch2:\n ch2[ch2.index(-1)]=p1[i]\n return ch1, ch2", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def _cross_parents(self):\n while len(self.children_population) < self.children_count:\n parent_1, parent_2 = random.sample(self.population, k=2)\n self.children_population.extend(self.crossover.cross(parent_1, parent_2))", "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def crossover(chromosome_1, chromosome_2):\n (x1, y1) = (randrange(col_count), randrange(row_count))\n (x2, y2) = (randrange(x1+1, col_count+1), randrange(y1+1, row_count+1))\n def mate(chromosome_1, chromosome_2):\n used = set(chromosome_1[x+y*col_count] for x in range(x1, x2) for y in range(y1, y2))\n not_used = (allele for allele in chromosome_2 if allele not in used)\n return [chromosome_1[x+y*col_count] if x1 <= x < x2 and y1 <= y < y2 else next(not_used) for y in range(row_count) for x in range(col_count)]\n return (mate(chromosome_1, chromosome_2), mate(chromosome_2, chromosome_1))", "def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances", "def selection(probs):\n # pick 2 parents out of this distribution\n t = [i for i in range(len(probs))]\n draw = choice(t, 2, p=probs, replace=False)\n return draw", "def crossover(self, gene2):\r\n assert self.key == gene2.key\r\n\r\n \r\n new_gene = self.__class__(self.key)\r\n for a in self._gene_attributes:\r\n if random() > 0.5:\r\n setattr(new_gene, a.name, getattr(self, a.name))\r\n else:\r\n setattr(new_gene, a.name, getattr(gene2, a.name))\r\n\r\n return new_genes", "def choose_mother(self, index):\n\n candidates = []\n while not candidates:\n tgt_age = int(sample_table(self.fertility_age_probs[index], self.rng)[0])\n tgt_prev_min = 0; tgt_prev_max = 100\n if self.params['use_parity']:\n # old\n # tgt_prev_min = int(sample_table(\n # self.fertility_parity_probs[(tgt_age-15)/5], self.rng)[0])\n tgt_prev_min = int(sample_table(\n self.fertility_parity_probs[floor((tgt_age-15)/5)], self.rng)[0])\n # effectively transform 5 into 5+\n tgt_prev_max = tgt_prev_min if tgt_prev_min < 5 else 20\n tgt_set = self.P.individuals_by_age(tgt_age, tgt_age)\n candidates = [x\n for x in tgt_set \\\n if x.sex == 1 \\\n and x.can_birth() \\\n and not x.with_parents \\\n and tgt_prev_min <= len(x.children) <= tgt_prev_max\n ]\n # TODO ejw: consider updating parity prob usage to `len(x.children) - 1`\n # the `tgt_prev_min` and `tgt_prev_max` seems to be based on the probability that a mother of age `y` should\n # have `x` children at time period t. Say `x=1` children is chosen, then the mother should have one child.\n # Why should mothers with len(x.children)=1 then be considered as candidates? Shouldn't it be\n # `len(x.children) - 1 = 1`? Meaning, a mother without a child is chosen to have a child?\n # Unless, the parity table is restructured to mean the probability of a women with 0 children having a child\n # This actually makes more sense since the mother's age is chosen based on fertility rates, implying that a\n # mother in this age should have a child. If x=0 means no, then the probability of the mother actually\n # having a child is way too low: P(women of age y have a child) x (1 - P(x=0)).\n # Consider the actual probability tables for ages up-to 19:\n # 0.856 0\n # 0.125 1\n # 0.017 2\n # 0.001 3\n # 0.001 4\n # 0 5\n # The above either means the probability of mother of zero children having a child is quite high.\n # Or a mother not having a child is quite high.\n # From the above, x=5 is zero, but the above logic can assign a mother aged 18 with x=4 to a new child,\n # this making x=5 when she is 19, which should not be possible with the above.\n return self.rng.choice(candidates)", "def tournament_selection(population, board):\n t = len(population)\n best = replace(np.random.choice(population), board)\n for _ in (1, t):\n beside = replace(np.random.choice(population), board)\n if len(beside) < len(best):\n best = beside\n return best, beside", "def dynamic_crossover(nn1, nn2):\n # Lists for respective weights\n nn1_weights = get_weights(nn1.layers)\n nn2_weights = get_weights(nn2.layers)\n child_weights = []\n\n # Iterate through all weights from all layers for crossover\n for index, _ in enumerate(nn1_weights):\n # Get single point to split the matrix in parents based on # of cols\n coulmns = np.shape(nn1_weights[index])[1]-1\n split = random.randint(0, coulmns)\n # Iterate through after a single point and set the remaing cols to nn_2\n for j in range(split, coulmns):\n nn1_weights[index][:, j] = nn2_weights[index][:, j]\n\n # After crossover add weights to child\n child_weights.append(nn1_weights[index])\n\n # Add a chance for mutation\n mutation(child_weights)\n\n # Create and return child object\n return NeuralNetwork(child_weights)", "def default_random_forest(features, df): \n X= features\n y = df['Severity'].values\n\n classify = RandomForestClassifier(n_estimators = 100)\n classify.fit(X, y)\n y_pred = classify.predict(X)\n \n return classification_report(y, y_pred, target_names=['Non-Severe', 'Severe'])", "def _selection(self, evaluations, selection, method=\"truncated\", best_rate=0.2):\n\n if selection:\n end_range_for_parents = max(1, int(self.population_size * best_rate))\n evaluations_sorted = torch.sort(evaluations)\n population_sorted = self.population[evaluations_sorted[1]]\n\n if self.best_individual is None:\n self.best_individual = population_sorted[0]\n self.best_eval = evaluations_sorted[0][0]\n elif self.best_eval > evaluations_sorted[0][0]:\n self.best_individual = population_sorted[0]\n self.best_eval = evaluations_sorted[0][0]\n best_population = torch.zeros([end_range_for_parents, len(self.population[0])], device=self.device)\n if method == \"truncated\":\n \"\"\"\n returns best individuals\n \"\"\"\n best_population = population_sorted[:end_range_for_parents]\n elif method == \"fitness_based\":\n \"\"\"\n probability of each individual to be selected is proportional to its fitness value\n \"\"\"\n tot = sum(evaluations)\n probabilities = evaluations / tot\n for i in range(end_range_for_parents):\n best_idx = torch.distributions.categorical.Categorical(\n probabilities.clone().detach()).sample()\n best_population[i] = self.population[best_idx]\n # avoid repetitions\n probabilities[best_idx] = 0\n elif method == \"rank_based\":\n \"\"\"\n probability of each individual to be selected is proportional to its rank value\n \"\"\"\n tot = ((1 + len(evaluations)) / 2) * len(evaluations)\n ranks = torch.linspace(1, len(evaluations), steps=len(evaluations), device=self.device)\n sorted_probabilities = 1 - ranks / tot\n for i in range(end_range_for_parents):\n best_idx = torch.distributions.categorical.Categorical(\n sorted_probabilities).sample()\n best_population[i] = population_sorted[best_idx]\n # avoid repetitions\n sorted_probabilities[best_idx] = 0\n if self.elitism:\n best_population[end_range_for_parents - 1] = self.best_individual\n else:\n best_population = self.population\n return best_population", "def finish_sensitivity(self):\n # do at most 1000 features\n idx = torch.randperm(self._features.shape[1])[:100]\n self._features = self._features[:, idx]\n\n weight = self.module.weight.data\n num_features_in = weight.shape[1]\n selected_in = torch.zeros(num_features_in).bool()\n\n # greedy approach to rank in features\n for rank in reversed(range(num_features_in)):\n error_best = torch.Tensor([np.Inf])\n best = None\n\n # loop through remaining features to see which to add next\n for idx_in in range(num_features_in):\n # it's already in the set, no need trying to add it...\n if selected_in[idx_in]:\n continue\n\n # try adding in feature j and compute error\n selected_in[idx_in] = 1\n error_with_j = (\n self._features[selected_in].sum(dim=0) ** 2\n ).sum()\n\n # see if it's better than previous best\n if error_with_j < error_best:\n error_best = error_with_j\n best = idx_in\n\n # remove j from selectedIn for now\n selected_in[idx_in] = 0\n\n # add best one from this round to selectedIn\n selected_in[best] = 1\n\n # also note the rank of best in the sensitivities\n self.sensitivity_in[best] = rank", "def crude_policy_selection(actor, clusters):\n # read in results from optimisation\n results = []\n\n for _, case in the_cases(actor).items():\n temp = pd.read_csv(\"simulation/optimisation/\" + actor + \"/results_\" + case + \".csv\")\n temp_ = pd.read_csv(\"simulation/optimisation/\" + actor + \"/convergence_\" + case + \".csv\")\n results.append([temp, temp_])\n\n # collapse in 1 dataframe\n opt_df = pd.DataFrame()\n for i, (result, convergence) in enumerate(results):\n result[\"scenario\"] = i\n opt_df = pd.concat([opt_df, result], axis=0)\n\n # clean up\n opt_df.reset_index(inplace=True, drop=True)\n opt_df.drop_duplicates(inplace=True)\n\n # select policies + add scenario back\n policies = opt_df.iloc[:, :-4]\n policies = pd.concat([policies, opt_df[\"scenario\"]], axis=1)\n\n kmeans = KMeans(n_clusters=clusters, random_state=0).fit(policies.iloc[:, :-1])\n\n # get all policies in each cluster\n policies['cluster'] = kmeans.labels_\n groups = policies.groupby(by=\"cluster\")\n groups = groups.obj.sort_values(\"cluster\", ascending=True)\n\n # assign values to each policy in each cluster\n groups[\"value\"] = 0\n for i in range(clusters):\n group = groups.loc[groups[\"cluster\"] == i]\n group = group.iloc[:, :-3]\n scaler = preprocessing.MinMaxScaler().fit(group)\n data_scaled = scaler.transform(group)\n groups.at[group.index.values, 'value'] = data_scaled.sum(axis=1)\n\n # get the most extreme two per cluster\n idx = []\n for cluster in range(clusters):\n idx.extend(groups.loc[groups[\"cluster\"] == cluster].sort_values(by=\"value\", ascending=False)[:2].index.values.tolist())\n\n return opt_df.iloc[idx]", "def crossover(parent1, parent2):\n child = parent1.clone()\n for k in range(parent1.num_input + parent1.num_output):\n if np.random.randint(2) == 1:\n child.identifiers[k] = parent2.identifiers[k]\n child.inhibitors[k] = parent2.inhibitors[k]\n child.enhancers[k] = parent2.enhancers[k]\n\n child.identifiers = child.identifiers[:(child.num_input +\n child.num_output)]\n child.inhibitors = child.inhibitors[:(child.num_input + child.num_output)]\n child.enhancers = child.enhancers[:(child.num_input + child.num_output)]\n\n p1range = list(range(parent1.num_input + parent1.num_output,\n parent1.size()))\n random.shuffle(p1range)\n p2range = list(range(parent2.num_input + parent2.num_output,\n parent2.size()))\n random.shuffle(p2range)\n\n p1remaining = deepcopy(p1range)\n\n # Crossing regulatory\n p1_gene_count = 0\n p2_gene_count = 0\n for p1idx in p1range:\n min_dist = config.CROSSOVER_THRESHOLD\n paired_idx = None\n for p2idx in p2range:\n gdist = parent1.protein_distance(parent2, p1idx, p2idx)\n if gdist < min_dist:\n min_dist = gdist\n paired_idx = p2idx\n if paired_idx is not None:\n if np.random.randint(2) == 0:\n chosen_parent = parent1\n chosen_idx = p1idx\n p1_gene_count += 1\n else:\n chosen_parent = parent2\n chosen_idx = p2idx\n p2_gene_count += 1\n child.identifiers = np.append(\n child.identifiers, chosen_parent.identifiers[chosen_idx])\n child.inhibitors = np.append(\n child.inhibitors, chosen_parent.inhibitors[chosen_idx])\n child.enhancers = np.append(\n child.enhancers, chosen_parent.enhancers[chosen_idx])\n # Remove from consideration again\n p2range = list(set(p2range) - set([p2idx]))\n p1remaining = list(set(p1remaining) - set([p1idx]))\n\n # Add remaining material\n if child.size() == (child.num_input + child.num_output):\n prob = 0.5\n else:\n prob = p1_gene_count / (p1_gene_count + p2_gene_count)\n\n chosen_parent = parent2\n chosen_range = p2range\n if np.random.random() < prob:\n chosen_parent = parent1\n chosen_range = p1remaining\n\n for idx in chosen_range:\n child.identifiers = np.append(child.identifiers,\n chosen_parent.identifiers[idx])\n child.inhibitors = np.append(child.inhibitors,\n chosen_parent.inhibitors[idx])\n child.enhancers = np.append(child.enhancers,\n chosen_parent.enhancers[idx])\n\n child.num_regulatory = child.size() - (child.num_input + child.num_output)\n\n # Cross dynamics\n if np.random.random() < 0.5:\n child.beta = parent1.beta\n else:\n child.beta = parent2.beta\n\n if np.random.random() < 0.5:\n child.delta = parent1.delta\n else:\n child.delta = parent2.delta\n\n return child", "def crossover(self, pop):\n intDiscID = self.iID + self.dID\n varID = self.cID\n goldenRatio = (1.0 + sqrt(5)) / 2.0\n dx = np.zeros_like(pop[0])\n children = []\n used = []\n for i in range(0, int(self.fracElite * len(pop)), 1):\n r = int(rand() * self.population)\n while r in used or r == i:\n r = int(rand() * self.population)\n\n used.append(i)\n children.append(cp.deepcopy(pop[r]))\n dx = abs(pop[i] - children[i]) / goldenRatio\n children[i] = children[i] + dx * varID + np.round(dx * intDiscID)\n children[i] = simple_bounds(children[i], self.lb, self.ub)\n\n return (\n children, used)", "def _select(self):\n # The order of adding mutants and babies is important.\n # Later the statistics are computed assuming this addition order.\n self._add_newpop(self._mutants, self._babies)\n last_idx = min(self.pop_size, len(self._curr_pop))\n # Make sure the best and the worst chromos\n # after the selection are correctly placed.\n self._selects = np.argpartition(self._errors, 0)\n self._selects = self._selects[0:last_idx]\n self._curr_pop = self._curr_pop[self._selects]\n self._diffs = self._diffs[self._selects]\n self._errors = self._errors[self._selects]\n best = self._errors[0]\n if best < self._best_error:\n self._best_error = best\n self._best_gen = self._gen_idx", "def single_crossover(self, original1, original2):\n point=self.r.uniform(0.1,0.6)\n cut1=int(point*len(original1))\n cut2=int(point*len(original2))\n child1=original1[:cut1]+original2[cut2:]\n child2=original2[:cut2]+original1[cut1:]\n return child1, child2", "def one_point_crossover(self, another_individual):\n size = len(another_individual.get_genotype())\n genotype1 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n genotype2 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n idx = numpy.random.randint(1, size)\n numpy.put(genotype1, range(0, idx), another_individual.get_genotype()[0:idx])\n numpy.put(genotype1, range(idx, size), self.get_genotype()[idx:size])\n numpy.put(genotype2, range(0, idx), self.get_genotype()[0:idx])\n numpy.put(genotype2, range(idx, size), another_individual.get_genotype()[idx:size])\n\n return optimization.Individual(genotype1, self.fitness_evaluator, self.crossover_method, self.mutation_method), optimization.Individual(genotype2, self.fitness_evaluator, self.crossover_method, self.mutation_method)", "def _backward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = tuple(features)\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f not in k:\n continue\n candidate_features = tuple([x for x in k if x != f])\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])", "def cross_validation(features, target, n_neighbors=5, n_folds=5):\n clf = KNeighborsClassifier(n_neighbors=n_neighbors)\n cv_scores = cross_val_score(clf, features, target, cv=n_folds)\n # print each cv score (accuracy) and average them\n print('Cross Validation Scores Mean: %.2f' % (np.mean(cv_scores) * 100))", "def evolve(population, targetSum, targetProduct, retain=0.2, random_select=0.05, mutate=0.01):\n\n graded = [ ( fitness(x, targetSum,targetProduct), x ) for x in population]\n graded = [ x[1] for x in sorted(graded) ]\n retain_length = int(len(graded) * retain)\n parents = graded[:retain_length]\n\n # randomly add other individuals to promote genetic\n # diversity\n for individual in graded[retain_length:]:\n if random_select > random.random():\n parents.append(individual)\n\n # crossover parents to create offspring\n #print(\"starting on crossover\")\n desired_length = len(population) - len(parents)\n children = []\n while len(children) < desired_length:\n male = randint(0, len(parents) - 1)\n female = randint(0, len(parents) -1)\n if male != female:\n male = parents[male]\n female = parents[female]\n half = int(len(male) / 2)\n child = male[: half] + female[half:]\n children.append(child)\n\n # mutate some individuals\n #print(\"starting on mutation\")\n for individual in children:\n if mutate > random.random():\n half = int(len(individual) / 2 )\n pos_geneSum = randint(0, (half - 1))\n pos_geneProd = randint(half, (len(individual) - 1))\n tmp = individual[pos_geneSum]\n individual[pos_geneSum] = individual[pos_geneProd]\n individual[pos_geneProd] = tmp\n\n parents.extend(children)\n return parents" ]
[ "0.65794104", "0.6549801", "0.6248398", "0.6199317", "0.6146603", "0.5895406", "0.5845301", "0.5820917", "0.57894474", "0.574185", "0.567809", "0.5662331", "0.56227505", "0.5615345", "0.5599909", "0.55788946", "0.55743223", "0.556973", "0.5551336", "0.5547457", "0.55375916", "0.5511015", "0.5462419", "0.54469216", "0.5443067", "0.5428418", "0.54224104", "0.54042494", "0.5390942", "0.5379511", "0.5365957", "0.5335839", "0.5319202", "0.5312444", "0.5311286", "0.53093016", "0.5299422", "0.52980983", "0.5293322", "0.5291587", "0.52597976", "0.5255993", "0.5252462", "0.5233227", "0.52217484", "0.5214232", "0.52131104", "0.5211197", "0.52109665", "0.5208387", "0.51791394", "0.51790726", "0.51611674", "0.5157308", "0.51515657", "0.5150673", "0.5148026", "0.51309097", "0.51262754", "0.51225454", "0.5120186", "0.5118354", "0.5114808", "0.50986785", "0.5090003", "0.5090003", "0.5073801", "0.507052", "0.5069514", "0.5054269", "0.504911", "0.50443804", "0.5042119", "0.50350803", "0.5032943", "0.50275093", "0.5024635", "0.5022904", "0.50148624", "0.5014259", "0.5008707", "0.500662", "0.49995455", "0.4995664", "0.49921873", "0.49867576", "0.49797168", "0.49756262", "0.49687347", "0.4958747", "0.49552745", "0.49534112", "0.4947548", "0.49387994", "0.49310023", "0.4928599", "0.49285045", "0.49264345", "0.49250793", "0.49234572" ]
0.63705814
2
Just mutating every forest in collection.
def mutate(self): for forest in self._forests: forest.mutate(self._fullInput)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unifyPreviewNodes(self):\n\n self.leaves.update(self.forced)\n self.forced = set()", "def update(self):\n map(lambda x: x.update(), self._children.values())", "def update (self) :\n for met in self.gene :\n met(self)", "def update(self, list_of_sets):\n for s in list_of_sets:\n self.add(s)", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def update(self, iterable):\n self._update_nodes(iterable)", "def _reset(base: pymongo.database.Database) -> None:\n if base:\n for collection in base.list_collection_names():\n _reset_collection(base, collection)", "def resetWeights(T):\n T.children = [(t,0) for t in T.children]\n for t,w in T.children:\n resetWeights(t)", "def reassignWeights(self,weights):\n\t\n\t\tbranches = self.collectAllBranches()\n\n\t\tfor i in range(self.nBranches):\n\n\t\t\tbranches[i].weight = weights[i]", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def move_to_collection(self, destination_collection):\n for entity in self:\n entity.move_to_collection(destination_collection)", "def reset(self):\n for c in self.children:\n c.reset()\n self.marked = False", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def _mutate(self, individuals):\n for cur in individuals:\n if random.random() < self.mutation_probability:\n self.op.mutate(cur['individual'])\n cur['fitness'] = None", "def _reset_traversal_state(self):\n for n in self.nodes.values():\n n.reset_traversal_state()", "def __iter__(self):\n new_set = self._clone()\n new_set.tree.iterator = self.tree.traverse()\n return new_set", "def sync_territories(self):\n for territory_state in self.territory.all():\n territory_state.sync()", "def update(self) -> None:\n\t\t# Clear attributes that will be updates\n\t\tself.node_names: List[str] = []\n\t\tself.subnode_names: Dict[str, Set[str]] = {}\n\t\t# Iterate over RootNodes\n\t\tname: str\n\t\ts_name: str\n\t\tfor rootnode in self.root_nodes:\n\t\t\t# Iterate over Nodes\n\t\t\tfor node in rootnode.nodes:\n\t\t\t\tself._update_with_node(node)\n\t\t\tif len(rootnode.subnodes):\n\t\t\t\t# Create Set in subnode_names for the RootNode's SubNodes\n\t\t\t\tself.subnode_names[rootnode.name] = set()\n\t\t\t\t# Iterate over SubNodes\n\t\t\t\tfor subnode in rootnode.subnodes:\n\t\t\t\t\tself.subnode_names[rootnode.name].add(subnode.name)", "def update_all_readings(self):\n\n # update the reading of all nodes\n for node_name in self.nodes:\n\n # update the readings of all nodes\n self.nodes[node_name].reading()\n\n # once all nodes have updated, they can be stabilized\n for node_name in self.nodes:\n\n self.nodes[node_name].stabilize()", "def _mutate(self, p_mutate, mutation):\n self.children = mutation(self.children, p_mutate)", "def update_all(self, request):\n\n schema = self.session.info['schema']\n\n for item in self.query().filter_by(schema=schema):\n self.session.delete(item)\n\n for item in ElectionCollection(self.session).query():\n self.update(item, request)\n\n for item in ElectionCompoundCollection(self.session).query():\n self.update(item, request)\n\n for item in VoteCollection(self.session).query():\n self.update(item, request)", "def grow_trees(self, regrow=False):\n if self.forest == [] or regrow:\n mtry = int(math.floor(math.sqrt(len(self.variables))))\n data, trees, var, pred_index = self.data, self.trees, self.variables, self.prediction_index\n attr_fn, dist_classes, order, imp = self.attr_fn, self.dist_classes, len(self.data), self.importance_fn\n self.forest = random_forest.RandomForest(data, trees, mtry, var, pred_index, attr_fn, dist_classes, order, imp)\n print self.trees, ' have been grown using a set of ', len(self.variables), ' variables.'\n else:\n print \"Already a forest in place, add regrow=True to override.\"", "def unselectAll(self):\n\t\tself.tree.UnselectAll()", "def setSubtreeBF(self, index, subtree):\n if index == 0:\n try:\n self[:] = subtree\n except TypeError:\n del self[1:]\n self[0] = subtree\n return\n \n queue = deque(izip(repeat(self, len(self[1:])), count(1)))\n for i in xrange(index):\n elem = queue.popleft()\n parent = elem[0]\n child = elem[1]\n if isinstance(parent[child], Tree):\n tree = parent[child]\n queue.extend(izip(repeat(tree, len(tree[1:])), count(1)))\n parent[child] = subtree", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def randomize(self):\n for network in self.networks.values():\n network.database = []\n self.env = Environment(self.networks)", "def copy_many_to_temp(self,\r\n sourcerange=None):\r\n\r\n if sourcerange is None:\r\n sourcerange = []\r\n\r\n for a_temp in sourcerange:\r\n\r\n self.copy_to_temp(a_temp,\r\n self.tempobject)", "def copy_relations(self, oldinstance):\n for image in oldinstance.images.all():\n image.pk = None\n image.gallery = self\n image.save()", "def _update(self, features: DataFrameLike) -> None:\n # add features\n self._features = (\n pd.concat([self._features, features], axis=1, sort=True)\n # fill nans resulting from concatenation where features does not\n # contain neighborless nodes (out-degree=0) on its axis\n .fillna(0)\n )\n # prune redundant features\n pruner = FeaturePruner(self._final_features, self._feature_group_thresh)\n features_to_drop = pruner.prune_features(self._features)\n self._features = self._features.drop(features_to_drop, axis=1)\n # save features that remain after pruning and that\n # have not previously been saved as final features\n retained = features.columns.difference(features_to_drop)\n feature_dict = as_frame(self._features[retained]).to_dict()\n self._final_features[self.generation_count] = feature_dict", "def reset_bag(self):", "def resetTree(self):\n for fila in self.verDatos.get_children():\n self.verDatos.delete(fila)", "def reset(self):\n for parent in self.GetParents():\n parent.reset()", "def update(self,x,y):\n if self.ncores > 1:\n # parallel updates\n pass # FIXME\n else:\n # sequential updates\n for tree in self.forest:\n tree.update(x,y)", "def update_all_agent(self):\n for a in self.agents:\n soft_update(a.target_actor, a.actor, self.tau)\n soft_update(a.target_critic, a.critic, self.tau)\n self.num_iteration += 1", "def normalise(self):\n total = 0\n for feat_set in self.values():\n for value in feat_set.values():\n total += value\n norm = 1/total\n for feat_set in self.values():\n for feat in feat_set:\n feat_set[feat] *= norm\n return self", "def commit(self):\n for node in self.dep_graph.nodes_iter():\n role = self.roles[node]\n role.cur_rep = role.new_rep\n role.cur_hosts = list(role.new_hosts)\n for edge in self.dep_graph.edges_iter():\n edge_data = self.dep_graph.get_edge_data(*edge)\n edge_data['cur_weight'] = edge_data['new_weight']", "def load_all_groups(self):\n for _, group in self.scopes.items():\n group.update()", "def replace_self_references(self) -> None:\n for child in self.get_children_typed_dicts():\n if child is self:\n child.replace_with_dict.add(self.name)\n continue\n for sub_child in child.get_children_typed_dicts():\n if sub_child.replace_with_dict:\n continue\n if sub_child is self:\n sub_child.replace_with_dict.add(child.name)\n continue", "def subtrees(self):\n yield from subtrees(self)", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def set_values(self, new_values):\n for name, value in new_values.items():\n self.nodes_db.loc[name][\"node\"].set_value(value)", "def _mutate_file(self, node, visited = set([])):\n for ch in self._get_children(node):\n\n if ch not in visited:\n visited.add(ch)\n\n try:\n self._mutate_node(ch)\n except Exception as e:\n print(e)\n\n # Recursion is a bitch\n self._mutate_file(ch, visited)", "def __init__(self, forest):\n self.forest = forest", "def _reset_cache(self):\n self._cache = None\n for child in self.children: # pylint: disable=E1101\n child._reset_cache()", "def _exchange_ghosts_local(self):\n for d in xrange(self._dim):\n self._exchange_ghosts_local_d(d)", "def flip_subtree_colours(subtree):\n subtree.colour = not subtree.colour\n subtree.left.colour = not subtree.left.colour\n subtree.right.colour = not subtree.right.colour", "def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self", "def update(self, status):\n\n for name, c in self.children.items():\n c.update(status.child(name))", "def apply_migration (self, migration) :\n scope = self.home_scope\n for k in (\"Account\", \"Group\", \"Person\", \"links\") :\n for epk, db_attrs in sorted (pyk.iteritems (migration [k])) :\n ET = scope [epk [-1]]\n obj = ET.instance (* epk, raw = True)\n if obj is None :\n obj = ET (* epk, raw = True, ** dict (db_attrs))\n elif k == \"Account\" :\n obj.set_raw (** dict (db_attrs))", "def reset_all(self) -> None:\n for metric in self:\n metric.reset()", "def set_all_from_json(self, value:list):\n self.clear()\n for item in value:\n relation_id = item['relation_id']\n members = item['members']\n self[relation_id] = members", "def reinitialize(self):\n if self.is_leaf():\n self.__hash__(reinit=True)\n return {self}, {self}\n else:\n children_leaves = set()\n children_nodes = {self}\n # iterating over the children\n for child in self.child_nodes:\n cur_child_leaves, cur_child_nodes = self.child_nodes[child].reinitialize()\n children_leaves = children_leaves.union(cur_child_leaves)\n children_nodes = children_nodes.union(cur_child_nodes)\n # storing the sets for later use\n self.__hash__(reinit=True)\n self.leaves = children_leaves\n self.nodes = children_nodes\n return children_leaves, children_nodes", "def unnest_collection(collection, df_list):\n for item in collection['link']['item']:\n if item['class'] == 'dataset':\n df_list.append(Dataset.read(item['href']).write('dataframe'))\n elif item['class'] == 'collection':\n nested_collection = request(item['href'])\n unnest_collection(nested_collection, df_list)", "def reset(self):\n self.entities = set()\n self.frozen = False", "def updateMatingPool(self):\n self.matingPool = []\n for ind_i in self.population:\n self.matingPool.append( ind_i.copy() )", "def normalize(self):\n blocks = set(self.blocks)\n queue = set([self.entry_point])\n visited = set()\n while queue:\n root = queue.pop()\n visited.add(root)\n for child in root.children:\n if child not in visited:\n queue.add(child)\n unreachable = blocks - visited\n for block in unreachable:\n block.detach()\n visited.remove(self.entry_point)\n for block in visited:\n if block.empty():\n for parent in block.parents: # Re-parent\n for child in block.children:\n parent.add_child(child)\n block.detach()\n unreachable.add(block)\n blocks -= unreachable\n self.blocks = [block for block in self.blocks if block in blocks]", "def make_looped(self) -> None:\n self.most_right.right_node = self.most_left\n self.most_left.left_node = self.most_right", "def square_tree(t):\n t.entry = square(t.entry)\n for branch in t.branches:\n square_tree(branch)", "def update(self, edges) -> None:\n for v1, v2 in edges:\n self.add(v1, v2)", "def normalize(self):\n queue = {self.entry_point}\n visited = set()\n while queue:\n root = queue.pop()\n visited.add(root)\n for child in root.children:\n if child not in visited:\n queue.add(child)\n unreachable = self.blocks - visited\n for block in unreachable:\n block.detach()\n visited.remove(self.entry_point)\n for block in visited:\n if block.empty():\n for parent in block.parents: # Re-parent\n for child in block.children:\n parent.add_child(child)\n block.detach()\n unreachable.add(block)\n self.blocks -= unreachable", "def UpdateSet(self, dataset):\r\n for data in dataset:\r\n self.UpdateOddsRatioVsNoNorm(data)", "def collect_all(self):\r\n self.clear()\r\n self._process_lines(self._collect_all())", "def expunge_all(self) -> None:\n\n all_states = self.identity_map.all_states() + list(self._new)\n self.identity_map._kill()\n self.identity_map = identity.WeakInstanceDict()\n self._new = {}\n self._deleted = {}\n\n statelib.InstanceState._detach_states(all_states, self)", "def sync_all_children_to_redis(self):\n conn = get_redis_connection()\n key = GoogleMapsAddressComponent.get_redis_all_children_key(self.pk)\n # First, we make sure the key gets destroyed if it exists\n conn.delete(key)\n # Now we add the keys of the children to the list\n children = self.get_all_children_seq()\n for child in children:\n conn.lpush(key, child.pk)", "def reset_all(self):\n for i, stop in enumerate(self):\n stop._map = self\n stop.reset()", "def set_universe(self, i):\n self.universe.set(i)", "def ResetMatchList(self):\n for eachApplicant in self.matchList:\n eachApplicant.match = self", "def save(self, **kwargs):\n self.changeset.change_original_collection()\n\n # Adding sub-features will change the MPTT tree through direct SQL.\n # Load the new tree data from the database before parent serializer\n # overwrites it with old values.\n tree_attrs = ('lft', 'rght', 'tree_id', 'level', 'parent')\n db_feature = Feature.objects.only(*tree_attrs).get(id=self.feature.id)\n for attr in tree_attrs:\n setattr(self.feature, attr, getattr(db_feature, attr))\n\n # Adding sub-features will make cached properties invalid\n cached_params = (\n 'row_descendant_pks', 'descendant_pks', 'descendant_count',\n 'row_children', 'row_children_pks', 'page_children_pks',\n '_child_pks_and_is_page')\n for attr in cached_params:\n try:\n delattr(self.feature, attr)\n except AttributeError:\n pass # cached_property was not accessed during serialization", "def untie_everything(self):\r\n self.tied_indices = []", "def _Restore(self) -> None:\n self._SetNodes(self._nodes)", "def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()", "def update_animal_weight_age(self):\n for species in self.fauna_list:\n for animal in self.fauna_list[species]:\n animal.animal_grows()", "def featurize_all(self):\n for point in self.points:\n if not point.is_featurized():\n point.featurize()\n\n return self", "def CLRALL(self):", "def reset(self):\n for layer in self.network:\n layer.clean()", "def flush(self):\n for db in self.values():\n db.flush()", "def _initialize_trees(self):", "def make_forest (self, doclets):\n\n for o in doclets:\n if o.memberof is None:\n continue\n if o.memberof in self.longnames:\n o.parent = self.longnames[o.memberof]\n o.parent.children.append (o)\n continue\n if o.doc ():\n if o.memberof == '<anonymous>':\n o.error (\"\"\"Could not link up object %s to %s.\n Try giving the anonymous object an @alias.\"\"\"\n % (o.longname, o.memberof))\n else:\n o.error (\"Could not link up object %s to %s\" % (o.longname, o.memberof))", "def _rewrite_project(self, node: saldag.Project):\n\n selected_cols = node.selected_cols\n\n for in_col, out_col in zip(selected_cols, node.out_rel.columns):\n out_col.coll_sets |= copy.deepcopy(in_col.coll_sets)", "def sync(self):\n if not self.is_readonly():\n deser = self._deserialize()\n orig = getattr(self.model, self.name)\n if (orig != deser):\n if isinstance(orig, list):\n # first remove the original triples, instead of doing sophisticated\n # set manipulations\n setattr(self.model, self.name, [])\n setattr(self.model, self.name, deser)", "def revision(self) -> \"Collection\":\n revision_collection = clone(\n self.db_object, primary_key=dict(id=self.id, visibility=CollectionVisibility.PRIVATE)\n )\n self.session.add(revision_collection)\n for link in self.links:\n self.session.add(clone(link, collection_id=self.id, collection_visibility=CollectionVisibility.PRIVATE))\n self.session.commit()\n for dataset in self.datasets:\n Dataset(dataset).create_revision()\n return Collection(revision_collection)", "def cleanup(child):\n children = child.get('children', [])\n for childchild in children:\n cleanup(childchild)\n cleaned = {u'title': child['Title'], u'name': child['id'],\n u'children': children}\n child.clear()\n child.update(cleaned)", "def _reset_collection(base: pymongo.database.Database, collection: str) -> None:\n logger.info(f'Resetting all data related to \"{collection}\" collection...')\n nb_removed = base[collection].delete_many({}).deleted_count\n logger.info(f\"{nb_removed} records deleted.\")\n\n logger.info(f'Resetting counters.\"{collection}\".')\n nb_removed = base[\"counters\"].delete_many({\"_id\": collection}).deleted_count\n logger.info(f\"{nb_removed} counter records deleted\")", "def _add_all_to_tree(elms, trie):\n for elm in elms:\n tokens = tokenize(elm.name)\n for token in tokens:\n trie.add(token, elm)", "def _autoplace(self, nodes):\n for node in nodes:\n node.autoplace()", "def update_level(self):\n level = 1\n assigned_levels = set([])\n just_assigned = set([])\n for root in self.roots:\n for child in root.children:\n if child in just_assigned:\n continue\n child.level = level\n if len(child.children) == 0:\n continue\n just_assigned.add(child)\n assigned_levels = assigned_levels.union(just_assigned)\n\n level += 1\n leaves = [c for c in self.collectors if len(c.children) == 0]\n len_non_leaves = len(self.collectors) - len(leaves)\n self.update_level_for_non_leaves(\n level, assigned_levels, just_assigned, len_non_leaves\n )", "def calculateAllSupport(self):\n\n for key, value in self._sets[self._currentSet].items():\n val = self.getSupport(key)\n self._sets[self._currentSet][key] = self.getSupport(key)", "def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()", "def copy_children(self):\n\n # Create a group\n self.fileh.create_group('/', 'agroup')\n # Create several objects there\n for i in range(10):\n # Create a new array\n self.fileh.create_array('/agroup', 'array' + str(i), self.a1)\n # Excercise copy_children\n for i in range(self.nobjects):\n # Create another group for destination\n self.fileh.create_group('/', 'anothergroup' + str(i))\n # Copy children from /agroup to /anothergroup+i\n self.fileh.copy_children('/agroup', '/anothergroup' + str(i))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print((\"Time for Undo, Redo (copy_children):\", undo, \"s, \",\n redo, \"s\"))", "def reload(self):\n self._populate(self.hierarchy[-1])", "def flushCaches(self):\n self.rehabTreeCache = {} \n self.frailRehabTreeCache = {} \n self.frailTreeCache = {}", "def update_temporal_edges(self):\n for parse in self:\n try:\n self.model.graph.bump_temporal_edge(parse[-2], parse_set[-1])\n except IndexError:\n pass", "def update(knowledge, fact):\n sit, tar, val = fact\n fdepth = depth(fact)\n\n same_depth = [e for e in knowledge if depth(e) == fdepth]\n old_facts_val = get_facts(same_depth, sit, tar)\n old_facts = [(sit, tar, of) for of in old_facts_val]\n\n if fdepth == 1:\n for f in old_facts:\n knowledge.remove(f)\n else:\n to_del = _update(old_facts_val, val)\n for td in to_del:\n knowledge.remove((sit, tar, td))\n knowledge.append(fact)", "def update_scenes(self) -> None:\n self.scenes.update(\n {\n f\"{group.id}_{scene.id}\": scene\n for group in self.groups.values() # type: ignore\n for scene in group.scenes.values()\n if f\"{group.id}_{scene.id}\" not in self.scenes\n }\n )", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def preprocess(self):\n for texgroup in self.textureGroups.itervalues():\n texgroup.dirty = True", "def __resetLocal__(self,featureVals):\n self.amITrained = False\n self._amplitudes = {}\n self._eigs = {}\n self._modes = {}\n self.__Atilde = {}\n self.pivotValues = None\n self.KDTreeFinder = None\n self.featureVals = None", "def clear(self):\n del self.__tree\n self.__tree = AVLTree()\n print(\"Set is empty now\")", "def refresh(self):\n for budget in self.budgets:\n budget.refresh()\n self._budgets = None", "def clean():\n new_tree = None" ]
[ "0.59149694", "0.5712289", "0.54862785", "0.54687923", "0.5425373", "0.54242694", "0.5323184", "0.5317251", "0.52714866", "0.5256046", "0.525473", "0.524264", "0.52372867", "0.5176812", "0.51745", "0.5156172", "0.51512945", "0.51447666", "0.50903946", "0.5075218", "0.5069291", "0.50478506", "0.5037829", "0.5032198", "0.5025653", "0.5021753", "0.5008239", "0.50028604", "0.5002316", "0.50009376", "0.4988562", "0.4988246", "0.49838924", "0.49762136", "0.4970385", "0.49556962", "0.49515566", "0.4946977", "0.49311954", "0.49275878", "0.49269608", "0.4915272", "0.49040946", "0.48980615", "0.48940822", "0.48940143", "0.4892308", "0.48922685", "0.48909348", "0.48901242", "0.48851946", "0.48822215", "0.48779306", "0.48763463", "0.48732632", "0.48731014", "0.48724228", "0.48719877", "0.4870473", "0.4865603", "0.48617107", "0.48506063", "0.48496467", "0.48440564", "0.48373523", "0.48369646", "0.48306823", "0.48230654", "0.4818386", "0.48164818", "0.4814436", "0.4814132", "0.48057148", "0.48012745", "0.4795947", "0.4792446", "0.47780412", "0.47736084", "0.47733366", "0.47714084", "0.4768224", "0.47638205", "0.47603664", "0.47564143", "0.47538006", "0.4751523", "0.4748167", "0.47435227", "0.4743251", "0.47411102", "0.473533", "0.47342214", "0.47319898", "0.47254637", "0.47221223", "0.471259", "0.47113085", "0.4710358", "0.46980524", "0.46956873" ]
0.7783784
0
Query al cloud SGL
def query_ensor(sensorURI, fromTime, toTime, valueName): s = f"https://smartgardalake.snap4.eu/ServiceMap/api/v1/?serviceUri={sensorURI}&fromTime={fromTime}&toTime={toTime}&valueName={valueName}" print(s) response = requests.get(s) data = response.json() values = [] try: values = data["realtime"]["results"]["bindings"] except KeyError: print("[WARN] empty dataset") values.reverse() result = { "measuredTime": [], valueName: [], } print(len(values)) for i in range(len(values)): v = values[i] result["measuredTime"].append(v["measuredTime"]["value"]) try: float_measure = float(v[valueName]["value"]) if valueName == "CO2" and float_measure > 2000: result[valueName].append(np.nan) else: result[valueName].append(float_measure) except ValueError: result[valueName].append(np.nan) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query3() :", "def query(self):", "def query(output, query):\n gqlapi = gql.get_api()\n print_output(output, gqlapi.query(query))", "def query(self, query):", "def query(self, **kwargs):", "def soql_query(self, query):\n self.builtin.log(\"Running SOQL Query: {}\".format(query))\n return self.cumulusci.sf.query_all(query)", "def query(self):\n pass", "def solr_query(config, solr_host, fq, solr_collection_name):\n # solr_collection_name = config['solr_collection_name']\n\n getVars = {'q': '*:*',\n 'fq': fq,\n 'rows': 300000}\n\n url = f'{solr_host}{solr_collection_name}/select?'\n response = requests.get(url, params=getVars)\n return response.json()['response']['docs']", "def query(url):", "def query(self):\r\n raise NotImplementedError", "def query(monitorPoint) :\n return s.query(monitorPoint)", "def search(self, cp, min_surf, max_price, ad_type, nb_room_min, raw=True):\n _cp = []\n if type(cp) is list:\n for c in cp:\n _cp.append(self.get_location(c))\n else:\n _cp.append(self.get_location(cp))\n \n SEARCH_PAYLOAD = {\n \"pageIndex\": 1,\n \"pageSize\": 50000,\n \"query\": {\n \"bedrooms\": [],\n \"includeNewConstructions\": True,\n \"inseeCodes\": _cp,\n \"maximumPrice\": max_price,\n \"minimumLivingArea\": min_surf,\n \"realtyTypes\": 3,\n \"rooms\": range(nb_room_min, 5),\n \"sortBy\": 0,\n \"transactionType\": self._map_type(ad_type)\n }\n }\n \n SEARCH_URL = \"https://api-seloger.svc.groupe-seloger.com/api/v1/listings/search\"\n \n r = requests.post(SEARCH_URL, data=json.dumps(SEARCH_PAYLOAD), headers=self.headers)\n data = r.json()\n ret = {\n 'id': [],\n 'source': self.website\n }\n if raw:\n ret['raw'] = data\n for i in data['items']:\n ret['id'].append(i['id'])\n return ret", "def _query_catalog(self, version=None, level=None):\n print('WaPOR API: _query_catalog')\n\n if isinstance(version, int):\n if 0 < version < 3:\n self.version = version\n else:\n raise ValueError(\n 'WaPOR API ERROR: _query_catalog: Version \"{v}\"'\n ' is not correct!'.format(v=version))\n\n if isinstance(level, int):\n if 0 < level < 4:\n self.level = level\n else:\n raise ValueError(\n 'WaPOR API ERROR: _query_catalog: Level \"{lv}\"'\n ' is not correct!'.format(lv=level))\n elif level is None:\n self.version = version\n else:\n raise ValueError(\n 'WaPOR API ERROR: _query_catalog: Level \"{lv}\"'\n ' is not correct!'.format(lv=level))\n\n if self.level is None:\n base_url = '{0}{1}/cubes?overview=false&paged=false'\n request_url = base_url.format(\n self.path['catalog'],\n self.workspaces[self.version])\n else:\n base_url = '{0}{1}/cubes?overview=false&paged=false&tags=L{2}'\n request_url = base_url.format(\n self.path['catalog'],\n self.workspaces[self.version],\n self.level)\n\n if self.print_job:\n print(request_url)\n\n # requests\n try:\n resq = requests.get(\n request_url)\n resq.raise_for_status()\n except requests.exceptions.HTTPError as err:\n raise Exception(\"WaPOR API Http Error: {e}\".format(e=err))\n except requests.exceptions.ConnectionError as err:\n raise Exception(\"WaPOR API Error Connecting: {e}\".format(e=err))\n except requests.exceptions.Timeout as err:\n raise Exception(\"WaPOR API Timeout Error: {e}\".format(e=err))\n except requests.exceptions.RequestException as err:\n raise Exception(\"WaPOR API OOps: Something Else {e}\".format(e=err))\n else:\n resq_json = resq.json()\n\n try:\n resp = resq_json['response']\n # print(resp)\n\n if resq_json['message'] == 'OK':\n df = pd.DataFrame.from_dict(resp, orient='columns')\n return df\n # return df.sort_values(['code'], ascending=[True])\n else:\n print(resq_json['message'])\n except BaseException:\n print('WaPOR API ERROR: Cannot get {url}'.format(\n url=request_url))", "def call_api(page_num=0):\n base_url = \"http://data.sfgov.org/resource/jjew-r69b.json\"\n query_string = SoQL_query(page_num=page_num).generate_query()\n url = base_url+query_string\n response = requests.get(url)\n return response", "def request(query):", "def make_query(self):", "def _run_query(self):", "def mastQuery(request):\n\n server='mast.stsci.edu'\n\n # Grab Python Version\n version = \".\".join(map(str, sys.version_info[:3]))\n\n # Create Http Header Variables\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/plain\",\n \"User-agent\":\"python-requests/\"+version}\n\n # Encoding the request as a json string\n requestString = json.dumps(request)\n requestString = urlencode(requestString)\n\n # opening the https connection\n conn = httplib.HTTPSConnection(server)\n\n # Making the query\n conn.request(\"POST\", \"/api/v0/invoke\", \"request=\"+requestString, headers)\n\n # Getting the response\n resp = conn.getresponse()\n head = resp.getheaders()\n content = resp.read().decode('utf-8')\n\n # Close the https connection\n conn.close()\n\n return head,content", "def search(self, query):", "def fetch_all(): \n client, index_name = connection_es()\n res = client.search(index = index_name+\"*\")\n return res", "def knowledge_query():\n params = flask.request.get_json()\n if params is None or 'sql' not in params:\n return flask.jsonify({'error': 'No SQL specified in request'})\n else:\n knowledge_store = KnowledgeStore(settings['FLATMAP_ROOT'], create=False, read_only=True)\n result = knowledge_store.query(params.get('sql'), params.get('params', []))\n knowledge_store.close()\n if 'error' in result:\n app.logger.warning('SQL: {}'.format(result['error']))\n return flask.jsonify(result)", "def satquery(geojson, date_from=None, date_to=None, platform='Sentinel-2',\n cloud_cover_percentage=95):\n\n api = SentinelAPI(USERNAME, PASSWORD, 'https://scihub.copernicus.eu/dhus')\n\n footprint = geojson_to_wkt(read_geojson(geojson), decimals=6)\n kwargs = dict()\n kwargs['platformname'] = platform\n if platform == 'Sentinel-1':\n # Level-1 Ground Range Detected (GRD) products\n kwargs['producttype'] = 'GRD'\n elif platform == 'Sentinel-2':\n kwargs['cloudcoverpercentage'] = (0, cloud_cover_percentage)\n\n products = api.query(footprint, date=(date_from, date_to),\n area_relation='Contains', **kwargs)\n df = api.to_dataframe(products)\n return df.sort_values(by='beginposition')", "def do_search(arg):\n result = {'count': 0, 'time': 0, 'records': []}\n try:\n uri, q, k, m = arg\n dqp = Pyro.core.getProxyForURI(uri)\n scoresLen,results,indocids,exdocids = dqp.search(q, k, m)\n result=(scoresLen,results,indocids,exdocids)\n except Exception as e:\n print \"Exception:\", e\n return result", "def test_search_qlp():\n search = search_lightcurve(\"TIC 277554109\", author=\"QLP\", sector=11)\n assert len(search) == 1\n assert search.table[\"author\"][0] == \"QLP\"\n lc = search.download()\n assert type(lc).__name__ == \"TessLightCurve\"\n assert lc.sector == 11\n assert lc.author == \"QLP\"", "def query(env):\n if app.config['ENABLE_QUERY']:\n envs = environments()\n check_env(env, envs)\n\n form = QueryForm(meta={\n 'csrf_secret': app.config['SECRET_KEY'],\n 'csrf_context': session})\n if form.validate_on_submit():\n if form.endpoints.data == 'pql':\n query = form.query.data\n elif form.query.data[0] == '[':\n query = form.query.data\n else:\n query = '[{0}]'.format(form.query.data)\n result = get_or_abort(\n puppetdb._query,\n form.endpoints.data,\n query=query)\n return render_template('query.html',\n form=form,\n result=result,\n envs=envs,\n current_env=env)\n return render_template('query.html',\n form=form,\n envs=envs,\n current_env=env)\n else:\n log.warn('Access to query interface disabled by administrator..')\n abort(403)", "def search_es(es, query_embeddings, k):\n es_query ={\n \"query\": {\n \"knn\": {\n \"embeddings\": {\n \"vector\": query_embeddings,\n \"k\": k\n }\n }\n }\n }\n \n res = es.search(index=ES_INDEX, body=es_query, size=k)\n uris = [hit['_source']['uri'] for hit in res['hits']['hits']]\n return uris", "def res(ra, dec, ang):\n query = \"\"\"\n SELECT\n s.ra, s.dec,\n s.dered_g as g, s.dered_r as r,\n s.err_g, s.err_r,\n s.flags\n \n FROM\n dbo.fGetNearbyObjEq({}, {}, {}) AS n\n JOIN Star AS s ON n.objID = s.objID\n \n WHERE\n g - r BETWEEN -0.5 AND 2.5\n AND g BETWEEN 14 and 24\n \"\"\".format(ra,dec,ang)\n \n return SDSS.query_sql(query, timeout = 600)", "def query(params: Params):\n return service.query(**params.query, n_neighbors=params.n_neighbors)", "def carto_query(query):\n params = {'q': query, 'api_key': apikey(serv=\"cdb\")}\n d = api_query(api_url=CDB_URL, params=params)['rows']\n logging.info(\"Returned %d rows\" % len(d))\n return d", "def junos_cve_query(version):\n pass", "def gws_q(self, query, attribute, data):\n \n gws_q = query.format(attribute, data) #attribute = SQL table\n \n query_df = db.query(gws_q) \n return query_df", "def swis_query_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n query = args.get('query')\n if not query:\n raise ValueError(ERR_MSG['REQUIRED_ARGUMENT'])\n\n response = client.http_request(method=\"GET\", url_suffix=URL_SUFFIX[\"QUERY\"],\n params={\"query\": query})\n outputs = createContext(response.get(\"results\", []), removeNull=True)\n readable_response = convert_query_output_to_hr(outputs)\n return CommandResults(\n outputs_prefix=\"SolarWinds.Query\",\n outputs=outputs,\n readable_output=readable_response,\n raw_response=response\n )", "def get_smps(client, start, end):\n # load image or load from bigquery\n smps_query_str = \"SELECT * FROM cfog.sharp_smps \" +\\\n f\"WHERE timestamp BETWEEN '{start}' AND '{end}' \" +\\\n \"ORDER BY timestamp ASC\"\n print(f\"Executing bigquery query string: \")\n print(smps_query_str + '\\n')\n\n smps_query_job = client.query(smps_query_str)\n smps_query_job.result()\n smps_data = smps_query_job.to_dataframe()\n\n values = np.array(smps_data['values'].values.tolist()).T\n lowBouDia = np.array(smps_data['lowBouDia'].values.tolist()).T\n highBouDia = np.array(smps_data['highBouDia'].values.tolist()).T\n midDia = np.array(smps_data['midDia'].values.tolist()).T\n smps_data_df = smps_data.drop(columns=['values','lowBouDia','highBouDia','midDia']).set_index('timestamp')\n smps_data_out = dict(values=values,\n lowBouDia=lowBouDia,\n highBouDia=highBouDia,\n midDia=midDia,\n df=smps_data_df)\n\n print(f\"Query complete. Total number of data entries: {smps_data_out['df'].shape[0]}.\\n\\n\")\n return smps_data_out", "def query_api(location):\r\n norm_dict={\"0\":-1,\"1\":-0.6,\"2\":-0.2,\"3\":0.2,\"4\":0.6,\"5\":1.0}\r\n myDict={\"deli\":\"deli\",\"delis\":\"deli\",\"gluten\":\"gluten\",\"sushi\":\"sushi\",\"chicken\":\"chicken\",\"cheeseburger\":\"burger\",\"sandwich\":\"sandwich\",\"sandwiches\":\"sandwich\",\"hamburger\":\"hamburger\",\"hamburgers\":\"hamburger\",\"burger\":\"burger\",\"burgers\":\"burger\", \"hotdog\":\"hotdog\",\"hotdogs\":\"hotdog\", \"hot dog\": \"hotdog\", \"hot dogs\":\"hotdog\", \"hot-dog\":\"hotdog\",\"buffalo wing\": \"buffalo wing\", \"buffalo wings\":\"buffalo wing\", \"chicken wing\": \"chicken wing\",\"chicken wings\":\"chicken wing\",\"turkey\":\"turkey\",\"egg\":\"egg\",\"eggs\":\"egg\",\"waffle\":\"waffle\",\"waffles\":\"waffle\",\"corn\":\"corn\",\"milk shake\":\"milk shake\",\"milkshake\":\"milk shake\", \"milkshakes\":\"milk shake\", \"milk shakes\":\"milk shake\", \"donut\":\"donut\",\"donuts\":\"donut\",\"doughnut\":\"donut\",\"doughnuts\":\"donut\", \"steak\":\"steak\",\"steaks\":\"steak\", \"pizza\":\"pizza\", \"pizzas\":\"pizza\", \"mac and cheese\":\"mac and cheese\", \"macandcheese\":\"mac and cheese\", \"mac n cheese\":\"mac and cheese\", \"pasta\":\"pasta\", \"pastry\":\"pastry\",\"pastries\":\"pastry\", \"pastryies\":\"pastry\", \"tacos\":\"tacos\", \"breakfast\":\"breakfast\", \"lunch\":\"lunch\",\"dinner\":\"dinner\",\"brunch\":\"brunch\",\"snack\":\"snack\",\"snacks\":\"snack\", \"bar\":\"bar\",\"bars\":\"bar\", \"chineese\":\"chineese\", \"chines\":\"chineese\",\"chinese\":\"chineese\", \"japanese\":\"japanese\", \"korean\":\"korean\", \"indian\":\"indian\", \"india\":\"indian\", \"mexican\":\"mexican\", \"american\":\"american\", \"italian\":\"italian\", \"cake\":\"cake\", \"cakes\":\"cake\", \"pork\": \"pork\",\"pulled pork\":\"pulled pork\", \"pulledpork\":\"pulled pork\", \"pulled-pork\":\"pulled pork\", \"chicken nuggets\":\"chicken nuggets\", \"chicken nugget\":\"chicken nuggets\", \"beaf\":\"beef\",\"beef\":\"beef\",\"wing\":\"wings\",\"wings\":\"wings\", \"fries\": \"fries\",\"frenchfries\": \"fries\",\"french-fries\": \"fries\",\"pancake\":\"pancake\",\"pancakes\":\"pancake\"}\r\n lst=[\"delis\",\"deli\", \"gluten\",\"sushi\",\"chicken\",\"cheeseburger\",\"sandwich\",\"sandwiches\",\"hamburger\",\"hamburgers\",\"burger\",\"burgers\", \"hotdog\",\"hotdogs\", \"hot dog\", \"hot dogs\", \"hot-dog\",\\\r\n \"buffalo wing\", \"buffalo wings\", \"chicken wing\",\"chicken wings\", \"wing\", \"wings\", \"french fries\",\"frenchfries\", \"turkey\",\\\r\n \"egg\",\"eggs\",\"waffle\",\"waffles\",\"corn\",\"milk shake\",\"milkshake\", \"milkshakes\", \"milk shakes\", \"donut\",\"donuts\",\"doughnut\",\\\r\n \"doughnuts\", \"steak\",\"steaks\", \"pizza\", \"pizzas\", \"mac and cheese\", \"macandcheese\", \"mac n cheese\", \"pasta\", \"pastry\"\\\r\n \"pastries\", \"pastryies\", \"tacos\", \"breakfast\", \"lunch\",\"dinner\",\"brunch\",\"snack\",\"snacks\", \"bar\",\"bars\", \"chineese\", \\\r\n \"chines\",\"chinese\", \"japanese\", \"korean\", \"indian\", \"india\", \"mexican\", \"american\", \"italian\", \"cake\", \"cakes\", \"pork\"\\\r\n \"pulled pork\", \"pulledpork\", \"pulled-pork\", \"chicken nuggets\", \"chicken nugget\", \"nuggets\", \"nugget\", \"beaf\",\"beef\"] \r\n \r\n \r\n \r\n with open (\"state data/\"+location+\".json\", 'r') as fp, open (\"state data/\"+location+\" reviews.json\", 'a+') as fp1 :\r\n ##### logic to get business if from file\r\n df= pd.read_json(fp, lines=True)\r\n bid=df['id']\r\n for business_id in bid:\r\n keywords=[]\r\n review_text=[]\r\n \r\n rating=[]\r\n tb_score=[]\r\n norm_rating=[]\r\n \r\n review_response = get_reviews(API_KEY, business_id)\r\n try:\r\n \r\n for i in range (len(review_response)):\r\n temp=set()\r\n flag=0\r\n data = review_response[\"reviews\"][i]\r\n review_text.append(data[\"text\"])\r\n rating.append(data[\"rating\"])\r\n \r\n \r\n s=re.sub(r'[^\\w\\s]','',review_text[i])\r\n a = s.lower()\r\n word=a.split(' ')\r\n \r\n for worda in word:\r\n if (worda in lst):\r\n cusine=myDict[worda]\r\n temp.add(cusine)\r\n flag=1\r\n keywords.append(list(temp))\r\n \r\n \r\n if (flag==1):\r\n tb=TextBlob(review_text[i]).sentiment.polarity\r\n val=round(tb,2)\r\n #print (val)\r\n if (not(-0.2 <= val <= 0.2)) :\r\n tb_score.append(val) \r\n else:\r\n tb_score.append(0.0)\r\n else:\r\n tb_score.append(0.0)\r\n \r\n norm_rating.append(norm_dict[str(rating[i])])\r\n \r\n \r\n \r\n reviewDict = {\"id\":business_id, \"text\":review_text, \"rating\":rating, \\\r\n \"TBscore\": tb_score, \"normalisedRating\": norm_rating ,\"keywords\": keywords}\r\n json.dump(reviewDict,fp1)\r\n fp1.write(\"\\n\") \r\n except:\r\n pass", "def main():\n\n # Setup `pysc` to use BASIC auth, with a username, and password. Also sets the endpoint to use.\n setup_sensorcloud_basic(CONSTS['SC_USERNAME'], CONSTS['SC_PASSWORD'],\n CONSTS['SC_ENDPOINT'], CONSTS['PYSC_DEBUG'])\n\n org_id = CONSTS['ORG_ID']\n\n # Ensure the organisation exists on the SensorCloud endpoint.\n try:\n organisation = pysc.models.Organisation.single(org_id)\n except KeyError:\n raise RuntimeWarning(\"\"\"The organisation named {:s} was not found.\\n\"\"\"\n \"\"\"Although the `pysc` api has functionality to create an organisation, it cannot \"\"\"\n \"\"\"do so on the sensor-cloud.io instance on AWS.\"\"\".format(org_id))\n # Ensure sanity, check we got the organisation that we asked for.\n assert (org_id == organisation.id)\n\n # Here we use the Group.resolve_all helper with organisation_id param to filter groups based on id\n # The resolve_all command is similar to .index() however it also calls .follow() on found link automatically,\n # _and_ it converts the resulting HAL objects into real valid `pysc` Group() objects.\n org_groups = pysc.models.Group.resolve_all(params={'organisation_id': org_id})\n # We are not likely to have more than 1000 groups, so we don't need to do return doc pagination here.\n for g in org_groups:\n group_id = g.id\n print(\"Found group: {:s}\".format(group_id))\n\n print(\"Found a total of {:d} groups for {:s} on that SensorCloud endpoint.\".format(len(org_groups), org_id))", "def query(self) -> None:\n raise NotImplementedError()", "def query_the_api():\n\n box = [57.74, -2.97, 56.4, -1.7]\n strbox = (str(box)[1:-1]).replace(\" \", \"\")\n r = requests.get('https://api.luftdaten.info/v1/filter/box=' + strbox)\n my_json_list = r.json()\n\n return my_json_list", "def query_guide(lat='38.890762', lon='-77.084755:', radius='400'):\n location = f\"{lon}:{lat}\"\n try:\n auth_key = codecs.decode(config['michelin']['api_key'], 'rot-13')\n url = f\"https://secure-apir.viamichelin.com/apir/2/findPoi.json2/RESTAURANT/eng?center={location}&nb=100&dist={radius}&source=RESGR&filter=AGG.provider%20eq%20RESGR&charset=UTF-8&ie=UTF-8&authKey={auth_key}\"\n response = requests.get(url).json()[\"poiList\"]\n #db.get_collection(\"michelin_guide\").delete_many({}) # This needs to be moved into Flask to aggregate results\n db.get_collection(\"michelin_guide\").insert_many(response)\n logging.info(\"MongoDB Updated: Database - food_fighters, Collection - michelin_guide\")\n except Exception as e:\n logging.error(e)\n raise", "def query(self, *, sparql: str) -> Result:\n pass", "def _query_scryfall(query_param):\n params = 'q=%s' % query_param\n result = requests.get(consts.QUERY_URL, params)\n try:\n result.raise_for_status()\n except Exception as e:\n print('Bad stuff: ')\n print(result.content)\n raise\n\n return result.json()", "def get(self, request, *args, **kwargs):\n device = Device.objects.get(name=kwargs[\"device_name\"])\n global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\")\n status_code, data = graph_ql_query(request, device, global_settings.sot_agg_query)\n data = json.loads(json.dumps(data))\n return Response(GraphQLSerializer(data=data).initial_data, status=status_code)", "def load_catalog_from_sdss(ra, dec, sdss_filter, verbose=False, return_query=False, max_catsize=-1):\n\n\n #import sqlcl\n logger = logging.getLogger(\"GetCatalogFromSDSS\")\n\n #ra = 0\n #print \"# Loading catalog from SDSS online...\"\n \n if (numpy.array(ra).ndim > 0):\n min_ra = ra[0]\n max_ra = ra[1]\n else:\n min_ra = ra - 0.6/math.cos(math.radians(dec))\n max_ra = ra + 0.6/math.cos(math.radians(dec))\n \n if (min_ra < 0):\n ra_query = \"( ra > %(min_ra)f or ra < %(max_ra)f )\" % {\"min_ra\": min_ra+360, \"max_ra\": max_ra,} \n elif (max_ra > 360):\n ra_query = \"( ra > %(min_ra)f or ra < %(max_ra)f )\" % {\"min_ra\": min_ra, \"max_ra\": max_ra-360.,} \n else:\n ra_query = \"ra BETWEEN %(min_ra)f and %(max_ra)f\" % {\"min_ra\": min_ra, \"max_ra\": max_ra,} \n \n if (numpy.array(dec).ndim > 0):\n min_dec = dec[0]\n max_dec = dec[1]\n else:\n min_dec = dec - 0.6\n max_dec = dec + 0.6\n\n #\n # This query is taken from the SDSS website and selects stars with clean photometry\n # --> http://skyserver.sdss3.org/dr8/en/help/docs/realquery.asp#cleanStars\n #\n sql_query = \"\"\"\\\nSELECT ra,dec, u, err_u, g, err_g, r, err_r, i, err_i, z, err_z\nFROM Star \nWHERE \n%(ra_query)s AND dec BETWEEN %(min_dec)f and %(max_dec)f\nAND ((flags_r & 0x10000000) != 0)\n-- detected in BINNED1\nAND ((flags_r & 0x8100000c00a4) = 0)\n-- not EDGE, NOPROFILE, PEAKCENTER, NOTCHECKED, PSF_FLUX_INTERP,\n-- SATURATED, or BAD_COUNTS_ERROR\nAND (((flags_r & 0x400000000000) = 0) or (psfmagerr_r <= 0.2))\n-- not DEBLEND_NOPEAK or small PSF error\n-- (substitute psfmagerr in other band as appropriate)\nAND (((flags_r & 0x100000000000) = 0) or (flags_r & 0x1000) = 0)\n-- not INTERP_CENTER or not COSMIC_RAY\n\"\"\" % {\"filter\": sdss_filter,\n \"min_ra\": min_ra, \"max_ra\": max_ra,\n \"min_dec\": min_dec, \"max_dec\": max_dec,\n \"ra_query\": ra_query,\n }\n\n if (verbose): print(sql_query)\n logger.debug(\"Downloading catalog from SDSS ...\")\n logger.debug(sql_query)\n\n # stdout_write(\"Downloading catalog from SDSS ...\")\n\n # Taken from Tomas Budavari's sqlcl script\n # see http://skyserver.sdss3.org/dr8/en/help/download/sqlcl/default.asp \n import urllib\n # Filter out comments starting with \"--\"\n fsql = \"\"\n for line in sql_query.split('\\n'):\n fsql += line.split('--')[0] + ' ' + os.linesep\n params = urllib.urlencode({'cmd': fsql, 'format': 'csv'})\n url = 'http://skyserver.sdss3.org/dr8/en/tools/search/x_sql.asp'\n sdss = urllib.urlopen(url+'?%s' % params)\n # Budavari end\n\n\n answer = []\n for line in sdss:\n if (max_catsize > 0 and len(answer) >= max_catsize):\n break\n answer.append(line)\n if (((len(answer)-1)%10) == 0):\n if (verbose): stdout_write(\"\\rFound %d stars so far ...\" % (len(answer)-1))\n #answer = sdss.readlines()\n if (answer[0].strip() == \"No objects have been found\"):\n stdout_write(\" nothing found\\n\")\n if (return_query):\n return numpy.zeros(shape=(0,12)), fsql #sql_query\n return numpy.zeros(shape=(0,12))\n\n # stdout_write(\" found %d stars!\\n\" % (len(answer)-1))\n logger.debug(\" found %d stars!\\n\" % (len(answer)-1))\n\n if (verbose):\n print(\"Returned from SDSS:\")\n print(\"####################################################\")\n print(''.join(answer))\n print(\"####################################################\")\n\n # If we are here, then the query returned at least some results.\n # Dump the first line just repeating what the output was\n del answer[0]\n\n \n # if (verbose): print \"Found %d results\" % (len(answer))\n results = numpy.zeros(shape=(len(answer),12))\n # Results are comma-separated, so split them up and save as numpy array\n for i in range(len(answer)):\n items = answer[i].split(\",\")\n for col in range(len(items)):\n results[i, col] = float(items[col])\n #ra, dec = float(items[0]), float(items[1])\n #mag, mag_err = float(items[2]), float(items[3])\n #results[i, :] = [ra, dec, mag, mag, mag_err, mag_err]\n \n if (return_query):\n return results, fsql #sql_query\n return results", "def query_DB_satellites(outputpath=\"../data/\", user=\"anonimo\", passwd=\"secreto\"):\n #define the output file\n outputfile=outputpath+\"milky_way_satellites.csv\"\n # Build the SQL query\n \n query = \"with milky_way_halos as (select * from Bolshoi..BDMW where snapnum=416 and Mvir > 5.0E11 and Mvir < 6.0E11 ) select sub.* from milky_way_halos mwh, Bolshoi..BDMW sub where sub.snapnum = 416 and sub.hostFlag = mwh.bdmId\"\n\n # Build the wget command to query the database\n website = \"http://wget.multidark.org/MyDB?action=doQuery&SQL=\"\n username = user\n password = passwd\n \n wget_options=\" --content-disposition --cookies=on --keep-session-cookies --save-cookies=cookie.txt --load-cookies=cookie.txt --auth-no-challenge\" \n wget_options=wget_options+\" -O \"+outputfile +\" \"\n wget_command=\"wget --http-user=\"+username+\" --http-passwd=\"+password+\" \"+wget_options \n command=wget_command + \"\\\"\"+ website + query+\"\\\"\"\n print \"\"\n print query\n print \"\"\n print command\n print \"\"\n # execute wget in shell\n retcode = call(command,shell=True)", "def _gen_cat_query(self,query_fields=None):\n if query_fields is None:\n object_id_fields = ['decals_id','brick_primary','brickid','ra','dec','gaia_pointsource']\n mag_fields = ['mag_g','mag_r','mag_z','mag_w1','mag_w2','mag_w3','mag_w4']\n snr_fields = ['snr_g','snr_r','snr_z','snr_w1','snr_w2','snr_w3','snr_w4']\n query_fields = object_id_fields+mag_fields+snr_fields\n \n database = \"ls_dr7.tractor\"\n self.query = dlsurvey._default_query_str(query_fields, database, self.coord, self.radius)", "def retrieve_all (self, user, pwd, vector_file, tiles, product, startdate, enddate, cloud_max) :\n q_param = (SciHubMetadataExtractor.\n __compose_q_param(vector_file,tiles, product,startdate,enddate,cloud_max))\n if (q_param=='') :\n print (\"ERROR: can't compose query string\")\n return list()\n\n start = 0\n list_result = list()\n while True :\n query_base = SciHubMetadataExtractor.base_url\n query_base+='&start='+str(start) + '&rows='+str(SciHubMetadataExtractor.page_num)\n r = requests.post(query_base,{\"q\":q_param},auth=(user,pwd))\n if (r.status_code!=200) :\n print ('ERROR: ' + str(r.status_code))\n return ''\n json_response = json.loads(r.text)\n total = int(json_response[\"feed\"][\"opensearch:totalResults\"])\n if (total == 0) :\n return list_result\n \n raw_entities = json_response[\"feed\"][\"entry\"]\n if total == 1:\n t = list()\n t.append(raw_entities)\n raw_entities = t.copy()\n\n for re in raw_entities :\n list_result.append(SciHubMetadataExtractor.__convert_raw_entity(re)) \n \n if (start + SciHubMetadataExtractor.page_num >= total) :\n break\n else :\n start+=SciHubMetadataExtractor.page_num\n \n return list_result", "def query():\n data = {'version': config.API_VERSION}\n args = flask.request.args\n limit = args.get('limit', config.DEFAULT_QUERY_LIMIT)\n offset = args.get('offset', 0)\n q = args.get('q', '')\n table = args.get('table')\n filter_params = {'filter': args.get('filter')}\n try:\n total, result = db_client.search(table, q,\n limit, offset,\n **filter_params)\n data['result_count'] = total\n data['results'] = result\n except db.InvalidTable:\n data['error'] = 'Invalid table:'+str(table)\n\n return flask.jsonify(data)", "def get_queries() -> List[str]:\n\n logging.info(\"getting query files\")\n gcs = storage.Client(project=GCP_PROJECT)\n bucket = gcs.bucket(GCS_BUCKET)\n blobs = bucket.list_blobs(prefix='layered_gis')\n queries = {}\n for blob in blobs:\n blob_name = blob.name\n if '.sh' in blob_name:\n continue\n filename = blob_name.replace('layered_gis/', '')\n layer, _ = filename.split('/')\n sql_query = blob.download_as_string().decode('utf-8')\n full_query = queries.get(layer, '')\n if full_query:\n full_query += 'UNION ALL \\n'\n full_query += sql_query + '\\n'\n queries[layer] = full_query\n return queries.values()", "def search():\n # q is the name of the http parameter\n request.args.get(\"q\")\n\n #check for missing arguments\n if not(request.args.get(\"q\")):\n raise RuntimeError(\"Missing geo!\")\n\n #\"%\":match any number of characters\n q=request.args.get(\"q\") + \"%\"\n\n #retrieve data from database\n rows=db.execute(\"SELECT * from places WHERE postal_code LIKE :pc OR place_name LIKE :city OR admin_name1 LIKE :state\", pc=q,city=q,state=q)\n\n return jsonify(rows)", "def search(self):\n premium = self.config.get('premium', False)\n\n self.params[self.opts['keyword']['query_key']] = self.config[self.opts['keyword']['config_key']] # keyword\n # Selection params\n self.append_param('tag_mode', 'selection')\n if premium:\n self.append_param('order_premium', 'selection')\n else:\n self.append_param('order_not_premium', 'selection')\n\n self.append_param('type', 'selection')\n self.append_param('tool', 'selection')\n self.append_param('ratio', 'selection')\n self.append_param('mode', 'selection')\n\n # Number params\n self.append_param('min_width', 'number')\n self.append_param('max_width', 'number')\n self.append_param('min_height', 'number')\n self.append_param('max_height', 'number')\n if premium:\n self.append_param('min_bookmark', 'number')\n self.append_param('max_bookmark', 'number')\n else:\n self.set_bookmark_filter()\n\n # Date params\n self.append_param('start_time', 'date')\n self.append_param('end_time', 'date')\n\n # multi work filter\n self.filters['multi'] = self.config.get('download_multi', False)\n\n for i in range(self.config['start_page'], self.config['end_page'] + 1):\n self.params['p'] = i\n self.headers['Referer'] = 'https://www.pixiv.net/'\n url ='https://www.pixiv.net/search.php'\n html = self.session.get(url, headers = self.headers, params = self.params, timeout = 10, proxies = self.proxies)\n\n soup = BeautifulSoup(html.text, 'lxml')\n data_items = json.loads(soup.find('input', id = 'js-mount-point-search-result-list')['data-items'])\n\n return self.extract_work_info(data_items)", "def fusion_api_get_lsg(self, uri=None, param='', api=None, headers=None):\n return self.lsg.get(uri=uri, param=param, api=api, headers=headers)", "def product_search(obj, query):\n client = get_client(obj)\n\n pgs = client.product_list(q=query)\n\n print(json.dumps(pgs, indent=4))", "def get_data(kgs: int, credentials_db: dict) -> pd.DataFrame:\n main_query = f\"\"\"\n SELECT rectangle, radius\n FROM \n `kgs22_coordinates`\n WHERE \n `KGS22` = {kgs}\n \"\"\"\n\n conn = pymysql.connect(host=credentials_db['HOST'], user=credentials_db['USERNAME'],\n port=credentials_db['PORT'], passwd=credentials_db['PASSWORD'], db=credentials_db['DB'],\n connect_timeout=100000)\n df = pd.read_sql(main_query, conn)\n conn.close()\n return df", "def query_loggings(self, query: str) -> Tuple[List[dict], list]:\n query_data = {'query': self.add_instance_id_to_query(query),\n 'language': 'csql'}\n demisto.debug('Query being executed in CDL: {}'.format(str(query_data)))\n query_service = self.initial_query_service()\n response = query_service.create_query(query_params=query_data, enforce_json=True)\n query_result = response.json()\n\n if not response.ok:\n status_code = response.status_code\n try:\n # For some error responses the messages are in 'query_result['errors'] and for some they are simply\n # in 'query_result\n errors = query_result.get('errors', query_result)\n error_message = ''.join([message.get('message') for message in errors])\n except AttributeError:\n error_message = query_result\n\n raise DemistoException(f'Error in query to Cortex Data Lake XSOAR Connector [{status_code}] - {error_message}')\n\n try:\n raw_results = [r.json() for r in query_service.iter_job_results(job_id=query_result.get('jobId'),\n result_format='valuesDictionary',\n max_wait=2000)]\n except exceptions.HTTPError as e:\n raise DemistoException(f'Received error {str(e)} when querying logs.')\n\n extended_results: List[Dict] = []\n for result in raw_results:\n page = result.get('page', {})\n data = page.get('result', {}).get('data', [])\n if data:\n extended_results.extend(data)\n\n return extended_results, raw_results", "def test_query_hits(config):\n psp = PostgreSQLProvider(config)\n results = psp.query(resulttype=\"hits\")\n assert results[\"numberMatched\"] == 14776\n\n results = psp.query(\n bbox=[29.3373, -3.4099, 29.3761, -3.3924], resulttype=\"hits\")\n assert results[\"numberMatched\"] == 5\n\n results = psp.query(properties=[(\"waterway\", \"stream\")], resulttype=\"hits\")\n assert results[\"numberMatched\"] == 13930", "def query_datacube(product,latitude,longitude,time,measurements):\r\n\r\n dc = datacube.Datacube(app=\"Query\")\r\n\r\n xarr = dc.load(\r\n product=product, \r\n longitude=longitude, \r\n latitude=latitude,\r\n # Time format YYYY-MM-DD\r\n time=time, \r\n measurements=measurements\r\n )\r\n\r\n return xarr", "def mast_query(request):\n\n # Base API url\n request_url = 'https://mast.stsci.edu/api/v0/invoke'\n # Grab Python Version\n version = \".\".join(map(str, sys.version_info[:3]))\n # Create Http Header Variables\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/plain\",\n \"User-agent\": \"python-requests/\" + version}\n # Encoding the request as a json string\n req_string = json.dumps(request)\n req_string = urlencode(req_string)\n # Perform the HTTP request\n resp = requests.post(request_url, data=\"request=\" + req_string, headers=headers)\n # Pull out the headers and response content\n head = resp.headers\n content = resp.content.decode('utf-8')\n return head, content", "def search_results():\n skip = int(flask.request.args.get(\"skip\", \"0\"))\n limit = int(flask.request.args.get(\"limit\", \"20\"))\n\n obj = {}\n\n # query : will be event kit in case of triage information\n uidstr = flask.request.args.get(\"query\", None)\n\n if uidstr == None:\n obj[\"error\"] = \"Missing search ID\"\n\n uidstr = json.loads(uidstr)\n\n obj[\"query\"] = {}\n obj[\"query\"][\"uid\"] = uidstr\n obj[\"clips\"] = []\n states = backend.get_search_sessions()\n obj[\"sessions\"] = []\n for astate in states:\n obj[\"sessions\"].append(str(astate))\n try:\n uid = uuid.UUID(uidstr)\n state = backend.get_iqr_search_state(uid)\n # use the uid of the state and get the information from the database\n col = str(state.uuid)\n obj[\"collection\"] = col\n searchdb[col].ensure_index([(\"model_id\", pymongo.ASCENDING),(\"probability\", pymongo.DESCENDING) ])\n # Force probabilities\n obj[\"positives\"] = list(state.positives)\n obj[\"negatives\"] = list(state.negatives)\n log = \"\"\n for id in state.positives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 1.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 1.0001\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n\n for id in state.negatives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 0.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 0.0\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n obj[\"log\"] = log\n\n allres = searchdb[col].find({\"model_id\" : \"FUSION\"}).sort([(\"probability\", pymongo.DESCENDING)]).skip(skip).limit(limit)\n rank = skip + 1\n for one in allres:\n aclip = {}\n aclip[\"score\"] = one[\"probability\"]\n aclip[\"id\"] = \"HVC\" + str(one[\"clip_id\"]).zfill(6)\n clipobj = db[\"clips\"].find_one({\"id\" : \"HVC\" + str(one[\"clip_id\"]).zfill(6)},{\"duration\" : 1})\n aclip[\"duration\"] = clipobj[\"duration\"]\n aclip[\"rank\"] = rank\n rank = rank + 1\n obj[\"clips\"].append(aclip)\n obj[\"count\"] = len(obj[\"clips\"])\n\n except Exception as e:\n obj[\"error\"] = str(type(e)) + \": \" + str(e)\n return jsonify(obj)\n\n obj[\"next\"] = \"http://localhost:5003/iqr/search_results?\" + urllib.urlencode({\"uid\" : uid, \"skip\" : skip+limit } )\n return jsonify(obj)", "def query(self, query, request_type=None):\n\n #encode to UTF-8\n try: query = query.encode(\"utf-8\")\n except: query = query.decode('raw_unicode_escape').encode(\"utf-8\")\n\n lowercase_query = query.lower()\n if lowercase_query.startswith(\"select\") or \\\n lowercase_query.startswith(\"describe\") or \\\n lowercase_query.startswith(\"show\") or \\\n request_type==\"GET\":\n\n return self._get(urllib.urlencode({'sql': query}))\n\n else:\n return self._post(urllib.urlencode({'sql': query}))", "def search():\n\n # no search query retrieved\n if not request.args.get(\"q\"):\n raise RuntimeError(\"missing search parameter q\")\n\n # store search query\n q = request.args.get(\"q\")\n\n # remove any punctuation\n for punc in string.punctuation:\n q = q.replace(punc, '')\n\n # prevents http 500 error when string started with punctuation\n if q == \"\":\n q = \"xyz\"\n\n # split multi-word query\n elements = []\n for word in q.split():\n # add to array, concat with SQL wildcard\n elements.append(word + '%')\n\n if len(elements) == 1:\n # assuming: city // state\n station_list = Station.query.join(Place).\\\n filter(db.or_(Place.city.like(elements[0]), Place.state.like(elements[0]))).all()\n\n # assuming: name // call\n station_list += Station.query.\\\n filter(db.or_(Station.name.like(elements[0]), Station.call.like(elements[0]))).all()\n\n elif len(elements) == 2:\n # assuming: city city\n station_list = Station.query.join(Place).\\\n filter(Place.city.like(elements[0]+elements[1])).all()\n\n # assuming: city, state\n station_list += Station.query.join(Place).\\\n filter(db.and_(Place.city.like(elements[0]), Place.state.like(elements[1]))).all()\n\n # assuming: name / call, city / state\n station_list += Station.query.join(Place).\\\n filter(db.and_(\n db.or_(Station.name.like(elements[0]), Station.call.like(elements[0])),\n db.or_(Place.city.like(elements[1]), Place.state.like(elements[1])))).all()\n\n elif len(elements) == 3:\n # assuming: city city, state\n station_list = Station.query.join(Place).\\\n filter(db.and_(Place.city.like(elements[0]+elements[1]), Place.state.like(elements[2]))).all()\n\n # assuming: name / call, city city\n station_list += Station.query.join(Place).\\\n filter(db.and_(\n db.or_(Station.name.like(elements[0]), Station.call.like(elements[0])),\n Place.city.like(elements[1]+elements[2]))).all()\n\n # assuming: name / call, city, state\n station_list += Station.query.join(Place).\\\n filter(db.and_(\n db.or_(Station.name.like(elements[0]), Station.call.like(elements[0])),\n db.and_(Place.city.like(elements[1]), Place.state.like(elements[2])))).all()\n\n elif len(elements) == 4:\n # assuming: name / call, city city, state\n station_list = Station.query.join(Place).\\\n filter(db.and_(\n db.or_(Station.name.like(elements[0]), Station.call.like(elements[0])),\n db.and_(Place.city.like(elements[1]+elements[2]), Place.state.like(elements[3])))).all()\n\n # serialize thequery set\n result = geo_stations.dump(station_list)\n\n return jsonify(result.data)", "def _generate_es_query_external():\n\n internal_ips = [net[0] for service in Service.objects.all() for net in get_internal_ips(service)]\n\n query_object = {\n \"size\": 0,\n \"query\": {\n \"constant_score\": {\n \"filter\": {\n \"and\": [\n {\"term\": {\"direction\": \"out\"}},\n {\"range\": {\"@timestamp\": {\"gt\": \"now-10m\"}}},\n {\n \"not\": {\n \"terms\": {\"ip\": internal_ips}\n }\n }\n ]\n }\n }\n }\n }\n\n query_object[\"aggregations\"] = {\n \"external_services\": {\n \"terms\": {\n \"field\": \"ip\"\n },\n \"aggregations\": {\n \"clients\": {\n \"terms\": {\n \"field\": \"beat.hostname\"\n },\n \"aggregations\": {\n \"redis\": {\n \"terms\": {\n \"field\": \"type\",\n \"include\": \"redis\"\n },\n \"aggregations\": {\n \"oks\": {\n \"missing\": {\"field\": \"redis.error\"}\n },\n \"errors\": {\n \"filter\": {\"exists\": {\"field\": \"redis.error\"}}\n }\n }\n },\n \"mysql\": {\n \"terms\": {\n \"field\": \"type\",\n \"include\": \"mysql\"\n },\n \"aggregations\": {\n \"oks\": {\n \"filter\": {\"term\": {\"mysql.iserror\": \"false\"}}\n },\n \"errors\": {\n \"filter\": {\"term\": {\"mysql.iserror\": \"true\"}}\n }\n }\n },\n \"pgsql\": {\n \"terms\": {\n \"field\": \"type\",\n \"include\": \"pgsql\"\n },\n \"aggregations\": {\n \"oks\": {\n \"filter\": {\"term\": {\"pgsql.iserror\": \"false\"}}\n },\n \"errors\": {\n \"filter\": {\"term\": {\"pgsql.iserror\": \"true\"}}\n }\n }\n }\n }\n }\n }\n }\n }\n\n return query_object", "def wikidata_query(request, str):\n url_head = 'https://query.wikidata.org/sparql?query=PREFIX%20entity:%20<http://www.wikidata.org/entity/>%20SELECT%20?propUrl%20?propLabel%20?valUrl%20?valLabel%20?picture%20WHERE%20{%20hint:Query%20hint:optimizer%20%27None%27%20.%20{%20BIND(entity:';\n url_second = '%20AS%20?valUrl)%20.%20BIND(\"N/A\"%20AS%20?propUrl%20)%20.%20BIND(\"identity\"@en%20AS%20?propLabel%20)%20.%20}%20UNION%20{%20entity:';\n url_tail = '%20?propUrl%20?valUrl%20.%20?property%20?ref%20?propUrl%20.%20?property%20a%20wikibase:Property%20.%20?property%20rdfs:label%20?propLabel%20}%20?valUrl%20rdfs:label%20?valLabel%20FILTER%20(LANG(?valLabel)%20=%20%27en%27)%20.%20OPTIONAL{%20?valUrl%20wdt:P18%20?picture%20.}%20FILTER%20(lang(?propLabel)%20=%20%27en%27%20)%20}&format=json'\n\n if request.method == 'GET':\n r = requests.get(url_head+str+url_second+str+url_tail);\n return Response(r.json()['results']['bindings'])\n #print r", "def run_me(query_string):\n \n start = time.time()\n\n sparql.setQuery(query_string)\n #sparql.setReturnFormat(TURTLE) #this won't make a difference for this function but useful to know\n results = sparql.query()\n \n end = time.time()\n timepass = end - start\n print \"time_taken: \"+str(timepass)+\" seconds\"\n\n #these are like the table \"column headings\" in SQL; an array of dictionaries with the full bindings\n bindings = results.variables\n print(bindings)\n \n try:\n datas = results[bindings]\n for d in datas :\n for b in bindings :\n subj = d[b].value\n print subj+\"\\t\",\n print(\"\\n\")\n except IndexError:\n print \"No hits!\"\n return", "def query(q,epr,f='application/sparql-results+json'):\n\n try:\n params = {'query': q}\n params = urllib.urlencode(params)\n opener = urllib2.build_opener(urllib2.HTTPHandler)\n request = urllib2.Request(epr+'?'+params)\n request.add_header('Accept', f)\n request.get_method = lambda: 'GET'\n url = opener.open(request)\n return url.read()\n except Exception, e:\n traceback.print_exc(file=sys.stdout)\n raise e", "def __call__(self, query=None, limit=1, uid=None):\n\n # Query prefix\n prefix = \"select id, score, questionuser, question, tags, date, answeruser, object answer, reference from txtai where\"\n\n if uid is not None:\n # ID query\n query = f\"{prefix} id = '{uid}'\"\n elif self.embeddings.scoring:\n # Use custom tokenizer for word vector models\n query = Tokenizer.tokenize(query)\n\n # Run search and build id query\n result = self.embeddings.search(query, 1)[0] if query else {}\n query = f\"\"\"\n select id, {result.get('score')} score, questionuser, question, tags, date, answeruser, object answer, reference\n from txtai\n where id = '{result.get('id')}'\n \"\"\"\n else:\n # Default similar clause query\n query = f\"{prefix} similar('{query}')\"\n\n # Render results\n for result in self.embeddings.search(query, limit):\n # Show result\n self.result(result, limit)\n\n self.console.print()", "def query(self, queries):\n times = []\n for q in queries:\n # print(\"Starting \" + q)\n t_start = time.time()\n self.solr.search(\"text:\" + q, rows=self.n_rows)\n times.append(time.time()-t_start)\n return {\"times_query\": np.mean(times)}", "def icos_stations(*args):\n\n if len(args) != 4:\n filterstr = \" \"\n else:\n filterstr = \"\"\"\n filter(\n ?lat >= %s && ?lat <= %s &&\n ?lon >= %s && ?lon <= %s).\"\"\" % (args)\n\n\n query = \"\"\"\n PREFIX cpst: <http://meta.icos-cp.eu/ontologies/stationentry/>\n SELECT\n (IF(bound(?lat), str(?lat), \"?\") AS ?latstr)\n (IF(bound(?lon), str(?lon), \"?\") AS ?lonstr)\n (REPLACE(str(?class),\"http://meta.icos-cp.eu/ontologies/stationentry/\", \"\") AS ?themeShort)\n (str(?country) AS ?Country)\n (str(?sName) AS ?Short_name)\n (str(?lName) AS ?Long_name)\n (GROUP_CONCAT(?piLname; separator=\";\") AS ?PI_names)\n (str(?siteType) AS ?Site_type)\n FROM <http://meta.icos-cp.eu/resources/stationentry/>\n WHERE {\n ?s cpst:hasCountry ?country .\n ?s cpst:hasShortName ?sName .\n ?s cpst:hasLongName ?lName .\n ?s cpst:hasSiteType ?siteType .\n ?s cpst:hasPi ?pi .\n ?pi cpst:hasLastName ?piLname .\n ?s a ?class .\n OPTIONAL{?s cpst:hasLat ?lat } .\n OPTIONAL{?s cpst:hasLon ?lon } .\n OPTIONAL{?s cpst:hasSpatialReference ?spatRef } .\n OPTIONAL{?pi cpst:hasFirstName ?piFname } .\n %s\n }\n GROUP BY ?lat ?lon ?class ?country ?sName ?lName ?siteType\n ORDER BY ?themeShort ?sName\n \"\"\" %filterstr\n\n return query", "def main():\n\n # generate a token, we will be sending several queries off\n token = gen_token()\n # build the query string\n s_d, e_d = prev_quarter_boundaries(datetime.datetime.utcnow())\n s_str = s_d.strftime(\"%Y-%m-%d\")\n e_str = e_d.strftime(\"%Y-%m-%d\")\n query_str = (\n 'filingsource:\"Taiwan TWSE\" AND ' +\n 'enddate:[' + s_str + ' TO ' +\n e_str + ']'\n )\n # pull docs\n docs_res = documents_stringquery(query_str, False, token=token)\n # read out number of hits\n num_filings = docs_res['totalHits']\n # print it out\n print('Filing count from last quarter: ' + str(num_filings))", "def query_all(self, search_query = \"all\", request = -1, batch = 1000):\n\n # send to Arxiv to learn total number of papers complying the filter\n trial_feed = self.send_query(search_query, 0, 1)\n feedtotal = int(trial_feed.feed.opensearch_totalresults)\n print('Total results for this query: %i' % feedtotal)\n\n # create lists which will be converted to parquet files\n al = []\n cl = []\n\n # if request is not provided, set req to feedtotal\n req = feedtotal if request == -1 else min(request,feedtotal)\n t = batch\n totalresults = 0\n\n # send queries until req is fulfilled\n while totalresults < req: #(t == batch and totalresults < req) or (request == -1 and totalresults < feedtotal):\n feed = self.send_query(search_query, totalresults, batch)\n al = self.read_authors(feed, al)\n cl = self.read_collabs(feed, al, cl)\n t = len(feed.entries)\n # al, cl, t, _ = query(search_query, al, cl, start = totalresults, max_results = batch, verbose = False)\n # al = al + a\n # cl = cl + c\n totalresults = totalresults + t\n print(\"Query returned: \", t)\n print(\"Total results:%i\" % totalresults)\n # if t == 0: t = batch #potential bug\n\n # create a dataframe containing author ids and names\n schema_a = StructType([\n StructField(\"id\", IntegerType(), True),\n StructField(\"name\", StringType(), True)\n ])\n self.author_df = self.create_df(al, schema_a)\n\n # create a dataframe containing collaborations\n schema_c = StructType([\n StructField(\"src\", IntegerType(), True),\n StructField(\"dest\", IntegerType(), True),\n StructField(\"arxiv\", StringType(), True),\n StructField(\"title\", StringType(), True)\n ])\n self.collab_df = self.create_df(cl, schema_c)\n\n # print results to parquet\n print(\"Query completed, length of unique authors: \", self.author_df.count() )\n print(\"Length of collabs: \", self.collab_df.count() )\n self.author_df.write.mode('overwrite') \\\n .parquet(\"Data/authors-%s-total%i.parquet\" % (search_query.replace(\":\",\"\"), totalresults))\n self.collab_df.write.mode('overwrite') \\\n .parquet(\"Data/collab-%s-total%i.parquet\" % (search_query.replace(\":\",\"\"), totalresults))\n print(\"Parquet written.\")", "def peticionesArcGISServer(serviceURl):\r\n params = {\"where\": \"1=1\", \"f\":\"pjson\"}\r\n data = requests.get(serviceURl + \"/query\", params)\r\n count = 0\r\n for feature in data.json()[\"features\"]:\r\n count += 1\r\n\r\n print data.json()\r\n print \"Numero total de entidades: {0}\".format(count)", "def start_query():\n query_mapping = {'sum' : Sum()} #TODO: Fill in query mapping from string to object\n enclaves_in_query = {}\n request_dict = json.loads(request.data.decode('utf-8'))\n query_object = query_mapping[request_dict['query_type']]\n privacy_budget = str(request_dict['privacy_budget'])\n\n controller_map = ['128.32.37.205:2000']\n threads = []\n for controller in controller_map:\n thread = RequestThread(controller, enclaves_in_query, query_object, privacy_budget)\n thread.start()\n threads.append(thread)\n for thread in threads:\n thread.join()\n value = query_object.run_query(query_list)\n noise = query_object.generate_noise(query_list, privacy_budget)\n clear_query_list()\n return str(value + noise)", "def psirt_query(token):\n url = 'https://api.cisco.com/security/advisories/cvrf/latest/10'\n headers = {\n 'Accept': 'application/json',\n 'Authorization': 'Bearer ' + token,\n }\n last_10_vulns = requests.get(url, headers=headers)\n logger.info('query response code = ' + str(last_10_vulns.status_code))\n logger.debug(last_10_vulns)", "def query():\n query = request.json.get('query')\n variables = request.json.get('variables') # Todo: add handling variables\n logger.debug('Query: %s', request.json)\n result = schema.execute(query)\n result_hash = format_result(result)\n return result_hash", "def test_query(rgd):\n data = rgd.query(\"test\")\n assert isinstance(data, pd.DataFrame)\n assert data.iloc[0][\"name\"] == \"vm1\"", "def query(self):\n return self.snowflake_options.query", "def query(args):\n import ruido\n ruido.query('.index', 'find {} return .')\n return \"[]\"", "def query(self):\n query_url = self.get_query_url()\n logging.info('Querying: ' + query_url)\n json_data = request.urlopen(query_url).read().decode()\n logging.debug('Retrieved the following ' + json_data)\n response = json.loads(json_data)\n\n return self.get_docs_from_response(response)", "def query(self, page) -> [str, dict]:\n params = {'size': self.max_page_size,\n 'sort': 'displayLabel', 'page': page, 'query': self.querystring}\n url = '{base_url}/v1/stations/rsql'.format(base_url=self.base_url)\n return [url, params]", "def collect_es(name, config, host, kerberos, tls, uname=None, pword=None):\n try:\n a = None\n if kerberos:\n a = HTTPKerberosAuth()\n s = ''\n if tls:\n s = 's'\n if uname and pword:\n r = requests.get(\"http{}://{}/{}/_search\".format(s, host, config['index']), auth(uname, pword))\n else:\n r = requests.get(\"http{}://{}/{}/_search\".format(s, host, config['index'])\n data = json.dumps({\"query\": config['query'], \"size\": 0}),\n auth = a,\n )\n except:\n raise Exception('Cannot connect to Elasticsearch host: {}'.format(host))\n if r.status_code == 200:\n count = r.json()['hits']['total']\n successful = r.json()['_shards']['successful']\n failed = r.json()['_shards']['failed']\n total = r.json()['_shards']['total']\n duration = float(r.json()['took']) / 1000\n timed_out = r.json()['timed_out']\n else: \n raise Exception('Query failed: {}'.format(r.json()))\n\n metrics = {}\n metrics['results'] = Metric('es_search_results_total', 'Number of matching results from Elasticsearch', 'gauge')\n metrics['results'].add_sample('es_search_results_total', value=count, labels={'search': name})\n metrics['successful'] = Metric('es_search_shards_successful_total', 'Number of shards where the query returned successfully', 'gauge')\n metrics['successful'].add_sample('es_search_shards_successful_total', value=successful, labels={'search': name})\n metrics['failed'] = Metric('es_search_shards_failed_total', 'Number of shards where the query failed', 'gauge')\n metrics['failed'].add_sample('es_search_shards_failed_total', value=failed, labels={'search': name})\n metrics['total'] = Metric('es_search_shards_total', 'Number of shards queried', 'gauge')\n metrics['total'].add_sample('es_search_shards_total', value=total, labels={'search': name})\n metrics['timed_out'] = Metric('es_search_timed_out', 'Did the query time out', 'gauge')\n metrics['timed_out'].add_sample('es_search_timed_out', value=timed_out, labels={'search': name})\n metrics['duration'] = Metric('es_search_duration_seconds', 'Time Elasticsearch search took, in seconds', 'gauge')\n metrics['duration'].add_sample('es_search_duration_seconds', value=duration, labels={'search': name})\n\n class Collector():\n def collect(self):\n return metrics.values()\n registry = CollectorRegistry()\n registry.register(Collector())\n return generate_latest(registry)", "def grasspi_query_db(table_name,query,value):\n\n query_entries = []\n conn = sqlite3.connect(grasspi_config.cfg.db_file)\n conn.text_factory = str\n c = conn.cursor()\n val = \"SELECT * FROM \" + table_name + ' WHERE '+ query +' = '+\"'\" + value +\"'\"\n for row in c.execute(val):\n query_entries.append(row)\n c.close()\n return query_entries", "def test_query(config):\n\n p = PostgreSQLProvider(config)\n feature_collection = p.query()\n assert feature_collection.get('type', None) == 'FeatureCollection'\n features = feature_collection.get('features', None)\n assert features is not None\n feature = features[0]\n properties = feature.get('properties', None)\n assert properties is not None\n geometry = feature.get('geometry', None)\n assert geometry is not None", "def gaia_query(ra_deg, dec_deg, rad_deg, maxmag=20, \n maxsources=10000):\n vquery = Vizier(columns=['Source', 'RA_ICRS', 'DE_ICRS', \n 'phot_g_mean_mag'], \n column_filters={\"phot_g_mean_mag\": \n (\"<%f\" % maxmag)}, \n row_limit = maxsources) \n \n field = coord.SkyCoord(ra=ra_deg, dec=dec_deg, \n unit=(u.deg, u.deg), \n frame='icrs')\n return vquery.query_region(field, \n width=(\"%fd\" % rad_deg), \n catalog=\"I/337/gaia\")[0]", "def cone_search(conn, table, center_ra, center_dec, radius, schema='public'):\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n query = 'SELECT * FROM {}.{} WHERE q3c_join(%s, %s, ra, dec, %s)'\n # This one isn't needed until > 1 million rows\n #query = 'SELECT * FROM {}.{} WHERE q3c_radial_query(ra, dec, %s, %s, %s)'\n cur.execute(psycopg2.sql.SQL(query).format(\n psycopg2.sql.Identifier(schema), psycopg2.sql.Identifier(table)),\n (center_ra, center_dec, radius))\n rows = cur.fetchall()\n cur.close()\n\n return rows", "def search(self, query, maxhits=100):", "def __call__(self, q=None, **kwargs):\n from elasticsearch.helpers import scan\n es = self.esindex.es\n index = self.esindex.index\n gscan = scan(es, q=q, index=index, _source=['uid'], **kwargs)\n # FIXME: we need a fetchstart to work around bug in databroker.\n # Remove the fetchstart mapping when fixed.\n fetchstart = self.db.hs.mds.run_start_given_uid\n gstartstop = ((fetchstart(e['_source']['uid']), None) for e in gscan)\n rv = Results(gstartstop, self.db, data_key=None)\n return rv", "def get_glossis_data():\n data_params = DATASETS_VIS['glossis']\n\n r = request.get_json()\n dataset = r['dataset']\n\n assert (dataset in data_params), '{} not in assets. '.format(dataset)\n data_params = data_params[dataset]\n # Get collection based on dataset requested\n collection = ee.ImageCollection(data_params['source'])\n\n if 'date' in r:\n start = ee.Date(r['date'])\n collection = collection.filterDate(start)\n # check that at least one image returned. If not, return error\n n_images = collection.size().getInfo()\n if not n_images:\n msg = 'No images available for time: %s' % (r['date'])\n logger.debug(msg)\n raise error_handler.InvalidUsage(msg)\n\n image = ee.Image(collection.sort('system:time_start', False).first())\n image_date = image.date().format().getInfo()\n # image_id = image.id().getInfo()\n\n # Generate image on dataset requested (characteristic to display)\n band = r.get('band', list(data_params['bandNames'].keys())[0])\n assert band in data_params['bandNames']\n\n\n if dataset in ['wind', 'currents']:\n function = r.get('function', 'magnitude')\n assert function in data_params['function']\n if function == 'magnitude':\n image = image.pow(2).reduce(ee.Reducer.sum()).sqrt()\n vis_params = {\n 'min': data_params['min'][function],\n 'max': data_params['max'][function],\n 'palette': data_params['palette'][function]\n }\n else:\n image = image.unitScale(data_params['min'][function], data_params['max'][function]).unmask(-9999)\n data_mask = image.eq(-9999).select(data_params['bandNames'][band])\n image = image.clamp(0, 1).addBands(data_mask)\n vis_params = {\n 'min': data_params['min'][function],\n 'max': data_params['max'][function]\n }\n else:\n image = image.select(data_params['bandNames'][band])\n vis_params = {\n 'min': data_params['min'][band],\n 'max': data_params['max'][band],\n 'palette': data_params['palette'][band]\n }\n\n if 'min' in r:\n vis_params['min'] = r['min']\n\n if 'max' in r:\n vis_params['max'] = r['max']\n\n if 'palette' in r:\n vis_params['palette'] = r['palette']\n\n info = generate_image_info(image, vis_params)\n info['dataset'] = dataset\n info['date'] = image_date\n # info['imageId'] = image_id\n # info['imageSource'] = data_params['source']\n\n return Response(\n json.dumps(info),\n status=200,\n mimetype='application/json'\n )", "def query(universe: pd.DataFrame, query_string: str) -> List[Stock]:\n scope = list(map(\n lambda stock: Stock(name=stock[2], stock_id=stock[1], price=stock[3]),\n universe.query(query_string).to_numpy()\n ))\n print(\"debug!!!!!\")\n print([(stock.get_id(), stock.get_name(), stock.get_price()) for stock in scope])\n return scope", "def search(self, query):\n launch_gs_app('search',\n self.browser,\n GoogleSuite.SEARCH_URL.format(_urlencode([('q', query)])))", "def query(self) -> dict:\n raise NotImplementedError()", "def query(sql):\n if (sql is None):\n raise Exception(\"SQL not specified\") \n try:\n database = App.instance().environment.database\n connection = psycopg2.connect(host=database.host, dbname=database.database, \n user=database.user, password=database.password)\n cursor = connection.cursor()\n cursor.execute(sql)\n fields = [ x[0] for x in cursor.description]\n return (fields, cursor.fetchall())\n except(Exception, psycopg2.DatabaseError) as error:\n print(\"Error connecting to database\", error)\n finally:\n if not connection is None:\n cursor.close()\n connection.close()", "def doQuery(self, s):\n self.setQuery(s)\n\n try:\n rval = self.query()\n g = rval.convert()\n return g['results']['bindings']\n except:\n print \"doQuery failed\"\n traceback.print_exc(file=sys.stdout)", "def query(self, block):\n raise NotImplementedError(\"Querying is an experimental feature\")", "def query(link, payload={}):\r\n response = requests.get(link, params=payload)\r\n if response.status_code != 200:\r\n print('WARNING', response.status_code)\r\n print(response.content)\r\n return response", "def queryComponent(type=None, filter=None, all=0):", "async def summary_census(myquery: UserRequestModel):\n age = myquery.age\n class_of_worker = myquery.class_of_worker\n det_ind_code = myquery.industry_code\n det_occ_code = myquery.occupation_code\n marital_stat = myquery.marital_status\n major_ind_code = myquery.major_industry_code\n major_occ_code = myquery.major_occupation_code\n hisp_origin = myquery.hispanic_origin\n sex = myquery.sex\n age = str(age)\n det_ind_code = str(det_ind_code)\n det_occ_code = str(det_occ_code)\n filter_query = \"\"\"\n WITH data AS (\n WITH data_occ AS (\n WITH data_class AS(\n WITH person_total AS (\n WITH person_edu AS (\n WITH person_sex AS (\n WITH person_race AS (\n WITH person_hisp AS (\n SELECT p1.id_person, p1.age, p1.year, p1.marital_stat, p1.race, \n p1.education, p1.sex, hsp.hisp_origin FROM person_tbl as p1\n INNER JOIN hisp_origin_tbl as hsp ON hsp.id = p1.hisp_origin\n )\n SELECT r.race, p2.id_person, p2.age, p2.year, p2.marital_stat,\n p2.education, p2.hisp_origin, p2.sex FROM race_tbl as r \n INNER JOIN person_hisp as p2 ON p2.race = r.id\n )\n SELECT p3.id_person, p3.race, p3.age, p3.year, p3.education, p3.hisp_origin,\n p3.sex, ms.marital_stat FROM person_race AS p3\n INNER JOIN martial_status_tbl as ms ON ms.id = p3.marital_stat\n )\n SELECT p4.id_person, p4.race, p4.age, p4.year, p4.marital_stat, p4.education, \n p4.hisp_origin, sex_tbl.sex FROM person_sex AS p4\n INNER JOIN sex_tbl ON sex_tbl.id = p4.sex\n )\n SELECT p5.id_person, p5.race, p5.age, p5.year, p5.marital_stat, edu.education,\n p5.hisp_origin, p5.sex FROM person_edu as p5\n INNER JOIN education_tbl as edu ON edu.id = p5.education\n )\n SELECT p.id_person, p.race, p.age, p.year, p.marital_stat, p.education, p.hisp_origin, \n p.sex, e.det_occ_code, e.wage_per_hour, e.union_member, e.unemp_reason,\n e.own_or_self, e.weeks_worked, e.income_50k, e.class_worker FROM person_total AS p\n INNER JOIN employee_tbl as e ON e.id_person=p.id_person\n )\n SELECT dcl.id_person, dcl.race, dcl.age, dcl.year, dcl.marital_stat, dcl.education, dcl.hisp_origin,\n dcl.sex, dcl.wage_per_hour, dcl.union_member, dcl.unemp_reason, dcl.own_or_self,\n dcl.weeks_worked, dcl.income_50k, dcl.det_occ_code, cw.class_worker FROM data_class as dcl\n INNER JOIN class_worker_tbl as cw ON cw.id = dcl.class_worker\n )\n SELECT docc.id_person, docc.race, docc.age, docc.year, docc.marital_stat, docc.education, docc.hisp_origin,\n docc.sex, docc.wage_per_hour, docc.union_member, docc.unemp_reason, docc.own_or_self,\n docc.weeks_worked, docc.income_50k, mo.major_occ_code, mo.det_ind_code, docc.class_worker,\n docc.det_occ_code FROM data_occ as docc\n INNER JOIN det_occ_code_tbl as mo ON mo.det_occ_code = docc.det_occ_code\n )\n SELECT data.id_person, data.race, data.age, data.year, data.marital_stat, data.education, data.hisp_origin,\n data.sex, data.wage_per_hour, data.union_member, data.unemp_reason, data.own_or_self, data.class_worker,\n data.weeks_worked, data.income_50k, data.major_occ_code, mi.major_ind_code, \n data.det_ind_code, data.det_occ_code FROM data\n INNER JOIN det_ind_code_tbl as mi ON mi.det_ind_code = data.det_ind_code\n WHERE age = '{}'\"\"\".format(age)\n\n filter_query = filter_query + \" AND class_worker = '{}'\".format(class_of_worker)\n filter_query = filter_query + \" AND data.det_ind_code = '{}'\".format(det_ind_code)\n filter_query = filter_query + \" AND data.det_occ_code = '{}'\".format(det_occ_code) \n\n if None in [marital_stat, major_ind_code, major_occ_code, hisp_origin, sex]:\n if marital_stat is not None:\n filter_query = filter_query + \" AND marital_stat = '{}'\".format(marital_stat)\n if major_ind_code is not None:\n filter_query = filter_query + \" AND major_ind_code = '{}'\".format(major_ind_code)\n if major_occ_code is not None:\n filter_query = filter_query + \" AND major_occ_code = '{}'\".format(major_occ_code)\n if hisp_origin is not None:\n filter_query = filter_query + \" AND hisp_origin = '{}'\".format(hisp_origin)\n if sex is not None:\n filter_query = filter_query + \" AND sex = '{}'\".format(sex) \n\n table_query = filter_query + ';'\n query_to_csv = await database.fetch_all(query=table_query)\n\n # data_file = open('files/filtered_table.csv', 'w', newline='')\n data_file = io.StringIO()\n csv_writer = csv.writer(data_file)\n count = True\n for emp in query_to_csv:\n if count:\n header = emp.keys()\n csv_writer.writerow(header)\n count = False\n csv_writer.writerow(emp.values())\n # data_file.close()\n\n final_block = \"\"\")\n SELECT avg(wage_per_hour) as mean_wage, avg(weeks_worked) as mean_weeks_worked,\n min(wage_per_hour) as min_wage, min(weeks_worked) as min_weeks_worked,\n max(wage_per_hour) as max_wage, max(weeks_worked) as max_weeks_worked,\n sum(income_50k) as person_50k_plus, count(id_person) as num_person\n FROM filter;\"\"\"\n \n filter_query = 'WITH filter AS ( ' + filter_query\n filter_query = filter_query + final_block\n results = await database.fetch_all(query=filter_query)\n\n answer = {}\n for row in results:\n answer.update(dict(row))\n # with open('files/query.json', 'w') as outfile:\n # json.dump(answer, outfile)\n json_writer = json.dumps(answer)#, default=jsonDefault)\n \n # files = ['files/query.json', 'files/filtered_table.csv']\n file_names = ['query.json', 'filtered_table.csv']\n file_objects = [json_writer, data_file.getvalue().encode()]\n files = []\n i = 0\n for f in file_names:\n files.append((f, file_objects[i]))\n i += 1\n\n return zipfiles(files)", "def sql(self, q):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'sql')\r\n\r\n return http.Request('POST', url, params), parsers.parse_json", "def query(\n c7n_config: C7nCfg,\n data_dir: PathLike = Path(\"data\").joinpath(\"query\"),\n telemetry_disabled: bool = True,\n):\n run(\n c7n_config, data_dir=data_dir, telemetry_disabled=telemetry_disabled, dryrun=True,\n )", "def search():\n query = input('Please enter your search query\\n')\n # For now, we will just print the whole database\n #db_actions.display()\n db_actions.search(query)", "def query():\n rows = []\n data = db.get()\n\n for calc in data:\n rows.append({\"ip\" : calc.ip, \"text\":calc.text})\n\n return jsonify(rows)", "def feature_layer_query(form):\n\n resource = None\n if \"resource\" in form.vars:\n resource = form.vars.resource\n # Remove the module from name\n form.vars.resource = resource[len(form.vars.module) + 1:]\n\n #if \"advanced\" in form.vars:\n # # We should use the query field as-is\n # pass\n\n #if resource:\n # # We build query from helpers\n # if \"filter_field\" in form.vars and \"filter_value\" in form.vars:\n # if \"deleted\" in db[resource]:\n # form.vars.query = \"(db[%s].deleted == False) & (db[%s][%s] == '%s')\" % (resource, resource, filter_field, filter_value)\n # else:\n # form.vars.query = \"(db[%s][%s] == '%s')\" % (resource, filter_field, filter_value)\n # else:\n # if \"deleted\" in db[resource]:\n # # All undeleted members of the resource\n # form.vars.query = \"(db[%s].deleted == False)\" % (resource)\n # else:\n # # All members of the resource\n # form.vars.query = \"(db[%s].id > 0)\" % (resource)\n if not resource:\n # Resource is mandatory if not in advanced mode\n session.error = T(\"Need to specify a Resource!\")\n\n return", "async def g(self, ctx, *, query):\n if not ctx.channel.is_nsfw:\n return await ctx.send('This command can only be used in nsfw channels due to abuse.')\n await ctx.trigger_typing()\n try:\n card, entries = await self.get_google_entries(query)\n except RuntimeError as e:\n await ctx.send(str(e))\n else:\n if card:\n value = '\\n'.join(f'[{title}]({url.replace(\")\", \"%29\")})' for url, title in entries[:3])\n if value:\n card.add_field(name='Search Results', value=value, inline=False)\n return await ctx.send(embed=card)\n\n if len(entries) == 0:\n return await ctx.send('No results found... sorry.')\n\n next_two = [x[0] for x in entries[1:3]]\n first_entry = entries[0][0]\n if first_entry[-1] == ')':\n first_entry = first_entry[:-1] + '%29'\n\n if next_two:\n formatted = '\\n'.join(f'<{x}>' for x in next_two)\n msg = f'{first_entry}\\n\\n**See also:**\\n{formatted}'\n else:\n msg = first_entry\n\n await ctx.send(msg)", "def query_site(url, params, uid=\"\", fmt=\"json\"):\n params[\"fmt\"] = fmt\n r = requests.get(url + uid, params=params)\n print(\"requesting\"+r.url)\n\n if r.status_code == requests.codes.ok:\n return r.json()\n else:\n r.raise_for_status()" ]
[ "0.6597153", "0.65192515", "0.6470072", "0.6186665", "0.60921586", "0.6054754", "0.6036551", "0.596416", "0.59399474", "0.59136164", "0.5830741", "0.57854694", "0.5752966", "0.57420826", "0.573901", "0.57316273", "0.5722046", "0.57058287", "0.5688149", "0.5679209", "0.56482124", "0.56466484", "0.56441003", "0.5632874", "0.56116414", "0.5605748", "0.55848145", "0.55736476", "0.55676657", "0.5558643", "0.55519426", "0.5534424", "0.55284476", "0.552536", "0.55217385", "0.5502826", "0.55010986", "0.5498547", "0.5481605", "0.54683834", "0.5444652", "0.5439647", "0.5431466", "0.5415598", "0.5388974", "0.5388093", "0.5385771", "0.5382612", "0.53818876", "0.5366139", "0.5361716", "0.5344728", "0.5340976", "0.533351", "0.5329065", "0.5327249", "0.5326622", "0.53117007", "0.53044987", "0.5300432", "0.52993196", "0.52982926", "0.52967113", "0.52856165", "0.5284209", "0.52800435", "0.5265239", "0.5255548", "0.525183", "0.5249336", "0.52481425", "0.5247656", "0.52355987", "0.5231367", "0.5220732", "0.5218727", "0.52043396", "0.52043384", "0.51975316", "0.5194736", "0.5192389", "0.5184892", "0.5182782", "0.5182447", "0.5178925", "0.5178648", "0.51745003", "0.5170805", "0.51687187", "0.5168443", "0.51682925", "0.51662534", "0.51607007", "0.51606226", "0.5154563", "0.515455", "0.5152446", "0.51478094", "0.514564", "0.5144719", "0.5143091" ]
0.0
-1
Query a SGL di un sensore del traffico Vedi query_ensor() per sensorURI, fromTime e toTime
def get_traffic_sensor_df(sensorURI: str, fromTime: str, toTime: str, resampleFreq: str = None, remove_outliers=False): values = ["count", "sumSpeed"] result = None for v in values: # data = query_ensor(sensorURI, fromTime, toTime, v) data = multiday_query(sensorURI, fromTime, toTime, v) df = pd.DataFrame(data, columns=["measuredTime", v]) df["measuredTime"] = pd.to_datetime(df["measuredTime"]) df.index = df["measuredTime"] del df["measuredTime"] if remove_outliers: z_scores = np.abs(stats.zscore(df)) print(f"Removed outliers: {df.size - df[(z_scores < 3).all(axis=1)].size}") df = df[(z_scores < 3).all(axis=1)] if resampleFreq is not None: df = df.resample(resampleFreq).sum() if result is not None: result = pd.merge_ordered(result, df, left_on="measuredTime", right_on="measuredTime") result.index = result["measuredTime"] del result["measuredTime"] else: result = df # avg speed result["avgSpeed"] = result["sumSpeed"] / result["count"] result.loc[~np.isfinite(result["avgSpeed"]), "avgSpeed"] = np.nan result["avgSpeed"] = result["avgSpeed"].interpolate() return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_ensor(sensorURI, fromTime, toTime, valueName):\n\n s = f\"https://smartgardalake.snap4.eu/ServiceMap/api/v1/?serviceUri={sensorURI}&fromTime={fromTime}&toTime={toTime}&valueName={valueName}\"\n print(s)\n response = requests.get(s)\n data = response.json()\n values = []\n try:\n values = data[\"realtime\"][\"results\"][\"bindings\"]\n except KeyError:\n print(\"[WARN] empty dataset\")\n values.reverse()\n result = {\n \"measuredTime\": [],\n valueName: [],\n }\n print(len(values))\n for i in range(len(values)):\n v = values[i]\n result[\"measuredTime\"].append(v[\"measuredTime\"][\"value\"])\n try:\n float_measure = float(v[valueName][\"value\"])\n if valueName == \"CO2\" and float_measure > 2000:\n result[valueName].append(np.nan)\n else:\n result[valueName].append(float_measure)\n except ValueError:\n result[valueName].append(np.nan)\n return result", "def read_sensors():\n previous_time = datetime.datetime.now()\n while True:\n now = datetime.datetime.now()\n delta = now - previous_time\n if delta.seconds >= sample_frequency:\n previous_time = now\n \n # Read SGP30.\n eCO2_data = sgp30.eCO2\n tvoc_data = sgp30.TVOC\n\n # Read VEML6070 and VEML7700, sample ten times.\n for j in range(10):\n light_data = light.lux\n uv_raw = uv.uv_raw\n uv_data = uv.get_index(uv_raw)\n\n # Read BME280.\n temp_data = bme280.temperature\n # Convert temperature (C->F)\n temp_data = temp_data * 1.8 + 32\n humid_data = bme280.humidity\n pressure_data = bme280.pressure\n\n # Write to database\n conn = sqlite3.connect(db)\n curs = conn.cursor()\n curs.execute(\"INSERT INTO data values(?, ?, ?, ?, ?, ?, ?, ?)\",\n (now, temp_data, humid_data, pressure_data, eCO2_data, tvoc_data,\n light_data, uv_data))\n conn.commit()\n conn.close()", "def get_all_sensors():\n\tquery_url = 'http://localhost:8079/api/query'\n\tquery = \"select *\"\n\tr = requests.post(query_url, query)\n\treturn r.content", "def sensor_history(self, sensor_name, start_time_sec, end_time_sec,\n include_value_ts=False, timeout_sec=0):\n\n if timeout_sec != 0:\n self._logger.warn(\n \"timeout_sec is no longer supported. Default tornado timeout is used\")\n\n params = {\n 'sensor': sensor_name,\n 'start_time': start_time_sec,\n 'end_time': end_time_sec,\n 'limit': MAX_SAMPLES_PER_HISTORY_QUERY,\n 'include_value_time': include_value_ts\n }\n\n url = url_concat(\n (yield self.get_sitemap())['historic_sensor_values'] + '/query', params)\n self._logger.debug(\"Sensor history request: %s\", url)\n response = yield self._http_client.fetch(url)\n data_json = json.loads(response.body)\n if 'data' not in data_json:\n raise SensorHistoryRequestError(\"Error requesting sensor history: {}\"\n .format(response.body))\n data = []\n for item in data_json['data']:\n if 'value_time' in item:\n sample = SensorSampleValueTime(item['sample_time'],\n item['value_time'],\n item['value'],\n item['status'])\n else:\n sample = SensorSample(item['sample_time'],\n item['value'],\n item['status'])\n data.append(sample)\n result = sorted(data, key=_sort_by_sample_time)\n raise tornado.gen.Return(result)", "def get_sensor(userid, deviceid, sensorid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors/{}\".format(sensorid))\n return make_response(sensor_response.content, sensor_response.status_code)", "def get_sensors(userid, deviceid):\n sensor_response = requests.get(\"http://sensor-access:5600/v1/sensors\", json=request.json)\n return make_response(sensor_response.content, sensor_response.status_code)", "def query(monitorPoint) :\n return s.query(monitorPoint)", "def sensors():\n sensor_data = query_db('SELECT * FROM sensors')\n return jsonify(results=sensor_data)", "def get_sondes(client, start, end):\n\n sonde_query_str = \"SELECT * FROM cfog.sharp_radiosonde \" + \\\n f\"WHERE LaunchTime BETWEEN '{start}' AND '{end}' \" + \\\n \"ORDER BY LaunchTime ASC\"\n\n print(f\"Executing bigquery query string: \")\n print(sonde_query_str + '\\n')\n\n sonde_data = {f\"{s['LaunchTime'].strftime('%m-%d_%H')}\":s for s in client.query(query=sonde_query_str)}\n\n print(\"Radiosondes obtained within the queried time bounds: \")\n print(list(sonde_data))\n\n sonde_data_out = {}\n for t in sonde_data:\n # ignored col: SoundingIdPk, RadioRxTimePk, PtuStatus\n sonde_data_out[t] = {}\n sonde_data_out[t]['df'] = pd.DataFrame({\n 'DataSrvTime' : sonde_data[t]['DataSrvTime'],\n 'Pressure' : sonde_data[t]['Pressure'],\n 'Temperature' : sonde_data[t]['Temperature'],\n 'Humidity' : sonde_data[t]['Humidity'],\n 'WindDir' : sonde_data[t]['WindDir'],\n 'WindSpeed' : sonde_data[t]['WindSpeed'],\n 'WindNorth' : sonde_data[t]['WindNorth'],\n 'WindEast' : sonde_data[t]['WindEast'],\n 'Height' : sonde_data[t]['Height'],\n 'WindInterpolated' : sonde_data[t]['WindInterpolated'],\n 'Latitude' : sonde_data[t]['Latitude'],\n 'Longitude' : sonde_data[t]['Longitude'],\n 'North' : sonde_data[t]['North'],\n 'East' : sonde_data[t]['East'],\n 'Up' : sonde_data[t]['Up'],\n 'Altitude' : sonde_data[t]['Altitude'],\n 'Dropping' : sonde_data[t]['Dropping']\n }\n )\n sonde_data_out[t]['LaunchTime'] = sonde_data[t]['LaunchTime']\n sonde_data_out[t]['LaunchLatitude'] = sonde_data[t]['LaunchLatitude']\n sonde_data_out[t]['LaunchLongitude'] = sonde_data[t]['LaunchLongitude']\n\n print(f\"Query complete. Total number of data entries: {len(sonde_data_out)}.\\n\\n\")\n\n del sonde_data\n return sonde_data_out", "def get_data(last):\n Table = \"ServerRoom\"\n filter = \"\"\n if last == \"lastone\":\n data = request_meteodata(\"SELECT * from `ServerRoom` ORDER BY id DESC LIMIT 1 \")\n if len(data) == 0:\n return [SensorData(datetime.datetime.now(), 0, 0)]\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n if last != \"All\":\n limit = datetime.datetime.now().astimezone(utz)\n if last == \"24hours\":\n limit -= datetime.timedelta(hours=24)\n else:\n limit = limit.replace(hour=0, minute=0, second=0, microsecond=0)\n if last == \"3days\":\n limit -= datetime.timedelta(days=3)\n elif last == \"7days\":\n limit -= datetime.timedelta(days=7)\n elif last == \"month\":\n limit = limit.replace(day=1)\n elif last == \"30days\":\n limit -= datetime.timedelta(days=30)\n elif last == \"year\":\n limit = limit.replace(day=1, month=1)\n filter = \" WHERE `date` > '\" + str(limit) + \"'\"\n order = \" ORDER BY `date` ASC\"\n req = \"SELECT * FROM `\" + Table + \"`\" + filter + order\n data = request_meteodata(req)\n if len(data) == 0:\n print(\"no data: get all\")\n req = \"SELECT * FROM `\" + Table + \"`\" + order\n data = request_meteodata(req)\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res", "def _poll_sensors(conn, cursor):\n conn, c = _get_db_connection()\n\n motion_reading = catnanny.motionsensor()\n temp_reading = catnanny.tempreading()\n\n current_timestamp = datetime.now().isoformat()\n # insert a timestamp, the word motion, and the output from catnanny.motionsensor into sensor_data\n c.execute(\"\"\"INSERT INTO sensor_data VALUES (?, ?, ?)\"\"\", (current_timestamp, 'motion', motion_reading))\n # insert a timestamp, the word temperature, and the output from catnanny.tempreading into sensor_data\n c.execute(\"\"\"INSERT INTO sensor_data VALUES (?, ?, ?)\"\"\", (current_timestamp, 'temperature', temp_reading))\n\n conn.commit()", "def GEEviLandsat(ptsFile,metric,timeStep,sensor,buf,poly,username,folderOut, scalePix = 30):\n \n # load required libraries\n import ee\n import math\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define dictionary for raster random names\n sensor_d = {}\n sensor_d['L4'] = 'LANDSAT/LT04/C01/T1_SR'\n sensor_d['L5'] = 'LANDSAT/LT05/C01/T1_SR'\n sensor_d['L7'] = 'LANDSAT/LE07/C01/T1_SR'\n sensor_d['L8'] = 'LANDSAT/LC08/C01/T1_SR'\n\n time_d = {}\n time_d['lowest'] = 'rl'\n time_d['month'] = 'rm'\n time_d['year'] = 'ry'\n \n \n #Computes the bits we need to extract.\n def getQABits(image, start, end, newName):\n pattern = 0\n listB = list(range(start, end+1))\n for one in listB:\n pattern += math.pow(2, one)\n pattern = int(pattern)\n \n return (image.select([0], [newName])\n .bitwiseAnd(pattern)\n .rightShift(start))\n \n for sen in sensor:\n LS = ee.ImageCollection(sensor_d[sen])\n #senL = [sen]\n \n def maskbyBits(img):\n QA = img.select('pixel_qa')\n QA1 = getQABits(QA, 3, 3, 'QA')\n QA2 = getQABits(QA, 5, 5, 'QA')\n\n mask = QA1.eq(0).And(QA2.eq(0))\n return img.updateMask(mask)\n \n LSm = LS.map(maskbyBits)\n \n lastImage = ee.Image(ee.ImageCollection(sensor_d[sen])\n .sort('system:time_start',False)\n .first())\n lastImageDate = lastImage.get('system:index').getInfo()\n\n firstImage = ee.Image(ee.ImageCollection(sensor_d[sen])\n .sort('system:time_start',True)\n .first())\n firstImageDate = firstImage.get('system:index').getInfo()\n \n startYear = int(firstImageDate[(len(firstImageDate)-8):(len(firstImageDate)-4)])\n endYear = int(lastImageDate[(len(lastImageDate)-8):(len(lastImageDate)-4)])\n startMonth = int(firstImageDate[(len(firstImageDate)-4):(len(firstImageDate)-2)])\n endMonth = int(lastImageDate[(len(lastImageDate)-4):(len(lastImageDate)-2)])-1\n startYearAll = startYear + 1\n endYearAll = endYear - 1\n \n years = list(range(startYear, endYearAll + 1))\n monthsEE = ee.List(list(range(startMonth,(12*len(years)+endMonth))))\n yearsEE = ee.List(list(range(startYearAll, endYearAll + 1)))\n \n for met in metric:\n # metL = [met]\n\n if (sen == 'L8' and met == \"NDVI\"):\n bands = ['B5', 'B4']\n elif (sen != 'L8' and met == \"NDVI\"):\n bands = ['B4', 'B3']\n elif (sen == 'L8' and met == \"NDWI\"):\n bands = ['B5', 'B6']\n elif (sen != 'L8' and met == \"NDWI\"):\n bands = ['B4', 'B5']\n elif (sen == 'L8' and met == \"NBR\"):\n bands = ['B5', 'B7']\n elif (sen != 'L8' and met == \"NBR\"):\n bands = ['B4', 'B7']\n #else:\n #print(\"wrong metric specified\")\n \n def addVI(image):\n vi = (image.normalizedDifference(bands)\n .rename('VI'))\n return image.addBands(vi)\n\n withVI = LSm.map(addVI)\n\n VI_col = withVI.select('VI')\n\n if timeStep == 'year':\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (VI_col\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (VI_col\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif timeStep == 'month':\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (VI_col\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (VI_col\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif timeStep == 'lowest':\n\n img_col = VI_col\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select('VI')\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_'+str(sen)+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for Landsat: ' + sen + '_' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select('VI')\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_'+str(sen)+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for Landsat: ' + sen + '_' + met)\n\n else:\n def table_m(image):\n table = (image\n .select('VI')\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_'+str(sen)+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for Landsat: ' + sen + '_' + met)", "def describeSensor(self, graph, uri):\n ret = []\n for p, o in graph.query(\"SELECT ?p ?o WHERE { ?uri ?p ?o }\",\n initBindings=dict(uri=uri)):\n if p in [RDFS.label, RDF.type]:\n continue\n ret.append('%s: <span class=\"value\">%s</span>' % (\n linked(graph.label(p), p),\n linked(graph.label(o), o) if isinstance(o, URIRef) else\n cgi.escape(o)))\n return '; '.join(ret)", "def electricity(osm_path): \n return retrieve(osm_path,'lines',['power','voltage'],**{'voltage':[\" IS NULL\"],})", "def sensor_values(self, filters, components=None, include_value_ts=False):\n if isinstance(components, list):\n components = \",\".join(components)\n else:\n components = \"all\"\n\n url = (yield self.get_sitemap())['monitor'] + '/list-sensors/' + components\n\n if isinstance(filters, basestring):\n filters = [filters]\n\n results_to_return = {}\n\n for filt in filters:\n query_url = url_concat(url, {\"reading_only\": \"1\", \"name_filter\": filt})\n response = yield self._http_client.fetch(query_url)\n try:\n results = json.loads(response.body)\n except ValueError:\n raise InvalidResponseError(\n \"Request to {} did not respond with valid JSON\".format(url))\n\n if len(results) == 0:\n raise SensorNotFoundError(\"No values for filter {} found\".format(filt))\n\n for result in results:\n if include_value_ts:\n results_to_return[result['name']] = SensorSampleValueTime(\n sample_time=result['time'],\n value_time=result['value_ts'],\n value=result['value'],\n status=result['status'])\n else:\n results_to_return[result['name']] = SensorSample(\n sample_time=result['time'],\n value=result['value'],\n status=result['status'])\n\n raise tornado.gen.Return(results_to_return)", "def update(self):\n self.cursor.execute(\"\"\"SELECT * FROM sensors_powersensor\"\"\")\n list = self.cursor.fetchall()\n for sensor in list:\n self.add(sensor[2], sensor[1])", "def main(temp, humid):\n user = 'root'\n password = 'root'\n dbname = 'iot'\n dbuser = 'raspberry'\n dbuser_password = 'password'\n query = 'select temp_value,humid_value from temp_humid;'\n json_body = [\n {\n \"measurement\": \"temp_humid\",\n \"fields\": {\n \"temp_value\": temp,\n \"humid_value\":humid \n\t}\n }\n ]\n\n client = InfluxDBClient('localhost', 8086, user, password, dbname)\n\n #client.create_database(dbname)\n\n print(\"Write points: {0}\".format(json_body))\n client.write_points(json_body)\n\n #print(\"Querying data: \" + query)\n #result = client.query(query)\n\n #print(\"Result: {0}\".format(result))\n\n #client.drop_database(dbname)", "def _latest_sensor_glucose_entry_in_range(from_datetime, to_datetime):\n glucose_pages_dict = json.loads(\n _pump_output(\n \"filter_glucose_date\",\n from_datetime.isoformat(),\n to_datetime.isoformat()\n )\n )\n last_page = glucose_pages_dict[\"end\"]\n glucose_history = json.loads(_pump_output(\"read_glucose_data\", str(last_page)))\n glucose_iterator = (x for x in reversed(glucose_history) if x[\"name\"] in (\"GlucoseSensorData\",\n \"CalBGForGH\"))\n\n last_datetime = to_datetime\n\n while from_datetime <= last_datetime:\n try:\n glucose_dict = next(glucose_iterator)\n except StopIteration:\n break\n\n last_datetime = parse(glucose_dict[\"date\"])\n amount = glucose_dict.get(\"sgv\", glucose_dict.get(\"amount\", 0))\n if amount > 0 and from_datetime <= last_datetime <= to_datetime:\n return glucose_dict", "def get(self):\n if now()-self.last_query < 1./self.query_rate:\n return None,None\n self.last_query = now()\n\n # query from saver (an old strategy that may be desired at points): \n #self.saver.query_flag.value = True\n #fr = mp2np(self.saver.query_queue)\n #frts = self.saver.query_queue_ts.value\n \n # query from _PSEye (a newer strategy that is preferable for most uses):\n self.pseye.query_flag.value = True\n while self.pseye.query_flag.value == True:\n pass\n fr = self.pseye.query_queue[0]\n frts = self.pseye.query_queue_ts.value\n\n x,y = self.resolution[self.query_idx]\n return frts,fr.reshape([y,x])", "def update(self):\n url = 'https://airapi.airly.eu/v2/measurements/point' \\\n '?lat={}&lng={}&maxDistanceKM=2'.format(self._latitude,\n self._longitude)\n headers = {'Accept': CONTENT_TYPE_JSON, 'apikey': self._token}\n request = requests.get(url, headers=headers)\n _LOGGER.debug(\"New data retrieved: %s\", request.status_code)\n if request.status_code == HTTP_OK and request.content.__len__() > 0:\n if (request.json()['current']['indexes'][0]['description'] ==\n ATTR_NO_SENSOR_AVAILABLE):\n _LOGGER.error(ATTR_NO_SENSOR_AVAILABLE)\n else:\n self.get_data(request.json())", "def filter_sensor_data(self,request):\n\n data = QueryDict.dict(request.data)\n if data[\"value\"]==\"min\":\n sensor_obj = SensorData.objects.filter( reading_date__gte = data[\"start_date\"],\n reading_date__lte = data[\"end_date\"])\n data_value = sensor_obj.aggregate(Min('reading'))\n data_value = data_value[\"reading__min\"]\n elif data[\"value\"]==\"max\":\n sensor_obj = SensorData.objects.filter(reading_date__gte=data[\"start_date\"],\n reading_date__lte=data[\"end_date\"])\n data_value = sensor_obj.aggregate(Max('reading'))\n data_value = data_value[\"reading__max\"]\n elif data[\"value\"]==\"average\":\n sensor_obj = SensorData.objects.filter(reading_date__gte=data[\"start_date\"],\n reading_date__lte=data[\"end_date\"])\n data_value = sensor_obj.aggregate(Avg('reading'))\n data_value = data_value[\"reading__avg\"]\n serializer = SensorDataSerializer(sensor_obj, many=True)\n context = {\n \"value\":data_value,\n \"data_list\":serializer.data\n }\n return Response(context, 200)", "def get_datapoints(self, rid, t0, t1, nmax = 300):\n self.read_curs.execute(\"SELECT COUNT(*) FROM readings WHERE readout_id = ? AND time >= ? AND time <= ?\", (int(rid), t0, t1))\n if self.read_curs.fetchone()[0] > nmax:\n self.read_curs.execute(\"SELECT avg(time),avg(value) FROM readings WHERE readout_id = ? AND time >= ? AND time <= ? GROUP BY round(time/?) ORDER BY time DESC\", (int(rid), t0, t1, (t1-t0)/nmax));\n else:\n self.read_curs.execute(\"SELECT time,value FROM readings WHERE readout_id = ? AND time >= ? AND time <= ? ORDER BY time DESC\", (int(rid), t0, t1))\n return self.read_curs.fetchall()", "def test_get_measurement_history(self):\n device = DeviceFactory(node=Node.objects.first(), external_id='123', type__code=SecureDeviceType.SRT321,\n device_param__type__code=SecureDeviceParameterType.MEASURED_TEMPERATURE)\n d_id_1 = device.external_id\n\n now_loc = datetime.datetime.now(bst)\n ts_loc = now_loc - datetime.timedelta(seconds=30)\n ts_str = ts_loc.strftime('%Y-%m-%dT%H:%M:%S')\n\n data = self.create_secure_server_push_data(d_id_1, ts_str)\n\n SecureClient.process_push_data(data)\n time.sleep(.5)\n\n # get newer timestamp\n ts_str = now_loc.strftime('%Y-%m-%dT%H:%M:%S')\n data = self.create_secure_server_push_data(d_id_1, ts_str, value=\"23.5\")\n\n SecureClient.process_push_data(data)\n\n token = Token.objects.get(user__username=email)\n device_param = device.parameters.first()\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n url = reverse('api:device_measurements', kwargs={'device_parameter_id': device_param.id})\n\n time.sleep(.5)\n\n response = client.get(url, format='json')\n\n self.assertTrue(response.status_code == 200)\n self.assertTrue(len(response.data) >= 2)", "def _compute_sensation_(self, name, sensor_window, timestamp_window, index_window):\n index_end = len(sensor_window)\n index_start = index_end - self._obs_history\n self._q_ = np.array([sensor_window[i]['q_actual'][0] for i in range(index_start,index_end)])\n self._qt_ = np.array([sensor_window[i]['q_target'][0] for i in range(index_start,index_end)])\n self._qd_ = np.array([sensor_window[i]['qd_actual'][0] for i in range(index_start,index_end)])\n self._qdt_ = np.array([sensor_window[i]['qd_target'][0] for i in range(index_start,index_end)])\n self._qddt_ = np.array([sensor_window[i]['qdd_target'][0] for i in range(index_start,index_end)])\n\n self._current_ = np.array([sensor_window[i]['i_actual'][0] for i in range(index_start,index_end)])\n self._currentt_ = np.array([sensor_window[i]['i_target'][0] for i in range(index_start,index_end)])\n self._currentc_ = np.array([sensor_window[i]['i_control'][0] for i in range(index_start,index_end)])\n self._mt_ = np.array([sensor_window[i]['m_target'][0] for i in range(index_start,index_end)])\n self._voltage_ = np.array([sensor_window[i]['v_actual'][0] for i in range(index_start,index_end)])\n\n\n self._safety_mode_ = np.array([sensor_window[i]['safety_mode'][0] for i in range(index_start,index_end)])\n\n #TODO: should there be checks for safety modes greater than pstop here, and exit if found?\n\n # Compute end effector position\n x = ur_utils.forward(sensor_window[-1]['q_actual'][0], self._ik_params)[:3, 3]\n np.copyto(self._x_, x)\n\n if self._target_type == 'position':\n self._target_diff_ = self._x_[self._end_effector_indices] - self._target_\n elif self._target_type == 'angle':\n self._target_diff_ = self._q_[-1, self._joint_indices] - self._target_\n\n self._reward_.value = self._compute_reward_()\n if self._reward_type == \"sparse\":\n done = self._reward_.value >= 0\n else:\n done = 0\n # TODO: use the correct obs that matches the observation_space\n return np.concatenate((self._q_[:, self._joint_indices].flatten(),\n self._qd_[:, self._joint_indices].flatten() / self._speed_high,\n self._target_diff_,\n self._action_ / self._action_high,\n [self._reward_.value],\n [done]))", "def request_realtime_info(self):\n self.socket_datastream.sendto(b\"!r\", self.ip_port_arduino_datastream)\n self.socket_datastream.sendto(b\"!s\", self.ip_port_arduino_datastream)", "def sensor(self):\n return ProxyList(self, OxfordITC503.Sensor, range(3))", "def query_radar_data(station,product,start,\n minute_delta=0,hour_delta=0,day_delta=0):\n \n end = start+timedelta(days=day_delta, minutes=minute_delta, hours=hour_delta)\n \n print(f\"query start time:{start}\")\n print(f\"query end time:{end}\")\n rs = RadarServer('http://thredds-aws.unidata.ucar.edu/thredds/radarServer/nexrad/level2/S3/')\n query = rs.query()\n rs.validate_query(query)\n print(rs.stations[station])\n\n query.stations(station).time_range(start,end).variables(product)\n catalog = rs.get_catalog(query)\n file_station = str(catalog.datasets[0])\n file_station = file_station[0:4]\n \n file_list = list(catalog.datasets.values())\n for t in file_list: print(t)\n LatLonBox = [rs.stations[station].longitude-3,rs.stations[station].longitude+3,\n rs.stations[station].latitude-2,rs.stations[station].latitude+2]\n \n return file_list,LatLonBox", "async def read(self, sensors):\n\n try:\n timeout = aiohttp.ClientTimeout(total=5)\n async with aiohttp.ClientSession(timeout=timeout,\n raise_for_status=True) as session:\n current_url = self.url_info\n async with session.get(current_url) as response:\n data = await response.text()\n\n if self.wifi:\n csv_data = StringIO(data)\n reader = csv.reader(csv_data)\n\n for row in reader:\n self.serialnumber = row.pop(0)\n else:\n xml = ET.fromstring(data)\n\n find = xml.find(\"SN\")\n if find is not None:\n self.serialnumber = find.text\n\n _LOGGER.debug(\"Inverter SN: %s\", self.serialnumber)\n\n current_url = self.url\n async with session.get(current_url) as response:\n data = await response.text()\n at_least_one_enabled = False\n\n if self.wifi:\n csv_data = StringIO(data)\n reader = csv.reader(csv_data)\n ncol = len(next(reader))\n csv_data.seek(0)\n\n values = []\n\n for row in reader:\n for (i, v) in enumerate(row):\n values.append(v)\n\n for sen in sensors:\n if ncol < 24:\n if sen.csv_1_key != -1:\n try:\n v = values[sen.csv_1_key]\n except IndexError:\n v = None\n else:\n v = None\n else:\n if sen.csv_2_key != -1:\n try:\n v = values[sen.csv_2_key]\n except IndexError:\n v = None\n else:\n v = None\n\n if v is not None:\n if sen.name == \"state\":\n sen.value = MAPPER_STATES[v]\n else:\n sen.value = eval(\n \"{0}{1}\".format(v, sen.factor)\n )\n sen.date = date.today()\n sen.enabled = True\n at_least_one_enabled = True\n else:\n xml = ET.fromstring(data)\n\n for sen in sensors:\n find = xml.find(sen.key)\n if find is not None:\n sen.value = find.text\n sen.date = date.today()\n sen.enabled = True\n at_least_one_enabled = True\n\n if not at_least_one_enabled:\n if self.wifi:\n raise csv.Error\n else:\n raise ET.ParseError\n\n if sen.enabled:\n _LOGGER.debug(\"Got new value for sensor %s: %s\",\n sen.name, sen.value)\n\n return True\n except (aiohttp.client_exceptions.ClientConnectorError,\n concurrent.futures._base.TimeoutError):\n # Connection to inverter not possible.\n # This can be \"normal\" - so warning instead of error - as SAJ\n # inverters are powered by DC and thus have no power after the sun\n # has set.\n _LOGGER.warning(\"Connection to SAJ inverter is not possible. \" +\n \"The inverter may be offline due to darkness. \" +\n \"Otherwise check host/ip address.\")\n return False\n except aiohttp.client_exceptions.ClientResponseError as err:\n # 401 Unauthorized: wrong username/password\n if err.status == 401:\n raise UnauthorizedException(err)\n else:\n raise UnexpectedResponseException(err)\n except csv.Error:\n # CSV is not valid\n raise UnexpectedResponseException(\n str.format(\"No valid CSV received from {0} at {1}\", self.host,\n current_url)\n )\n except ET.ParseError:\n # XML is not valid or even no XML at all\n raise UnexpectedResponseException(\n str.format(\"No valid XML received from {0} at {1}\", self.host,\n current_url)\n )", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors", "def fetch_data_from_db(sensorName):\n connection = sqlite3.connect('sensordata.db')\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM sensordata WHERE name = :name\", {'name': sensorName})\n observedsensor = cursor.fetchall()\n return observedsensor", "def recent_arima_sensors(now=dt.datetime.now(), timerange=dt.timedelta(days=5)):\n dt_from = now - timerange\n query = (\n db.session.query(ModelRunClass.sensor_id)\n .filter(ModelRunClass.time_created >= dt_from)\n .distinct()\n )\n ids = db.session.execute(query).fetchall()\n ids = [i[0] for i in ids]\n return ids", "def query_datacube(product,latitude,longitude,time,measurements):\r\n\r\n dc = datacube.Datacube(app=\"Query\")\r\n\r\n xarr = dc.load(\r\n product=product, \r\n longitude=longitude, \r\n latitude=latitude,\r\n # Time format YYYY-MM-DD\r\n time=time, \r\n measurements=measurements\r\n )\r\n\r\n return xarr", "def get(self, request, *args, **kwargs):\n device = Device.objects.get(name=kwargs[\"device_name\"])\n global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\")\n status_code, data = graph_ql_query(request, device, global_settings.sot_agg_query)\n data = json.loads(json.dumps(data))\n return Response(GraphQLSerializer(data=data).initial_data, status=status_code)", "def sensor(self , sensor_index):\n sensor = obd_sensors.SENSORS[sensor_index]\n try:\n r = self.get_sensor_value(sensor)\n except \"NORESPONSE\":\n r = \"NORESPONSE\"\n return (sensor.name,r, sensor.unit)", "async def async_discover_sensor(device_id):\n client = hass.data[tellduslive.DOMAIN]\n async_add_entities([TelldusLiveSensor(client, device_id)])", "def test_api_sensor(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load sensors from url specified in api base\n r = requests.get(r['sensors']).json()\n r = requests.get(r['sensors'][0]['url']).json()\n self.assertIn('description', r)\n self.assertIn('started', r)\n self.assertIn('maximum', r)\n self.assertIn('recent_sample', r)\n self.assertIn('id', r)\n self.assertIn('type', r)\n self.assertIn('url', r)\n self.assertIn('minimum', r)\n self.assertIn('ended', r)", "def GET_sensors(self):\n self.sensors.GetAll()\n self.sensors.sensors['charging-state'] = \\\n pyrobot.CHARGING_STATES[self.sensors.sensors['charging-state']]\n print simplejson.dumps(self.sensors.sensors)", "def get_sensor_consumption(self, service_location_id, sensor_id, start, end, aggregation):\n url = urljoin(URLS['servicelocation'], str(service_location_id), \"sensor\", str(sensor_id), \"consumption\")\n return self._get_consumption(url=url, start=start, end=end, aggregation=aggregation)", "def getDataWithTimeIndex(self, t):\n\n return self.sensorDf.iloc[t,:self.sensorChannels].values", "def fetch_latest_sample(self, sensor_id):\n conn = psycopg2.connect(self.conn)\n query = \"SELECT * FROM steve_sense_sensor_logs WHERE sensor_id = '{}' ORDER BY time DESC LIMIT 1\".format(\n sensor_id)\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(query)\n data = json.loads(json.dumps(\n cur.fetchall(), use_decimal=True, default=str))\n cur.close()\n conn.close()\n return data", "def query_rest_server(ts1, ts2, data_url, idstr):\n s = requests.Session()\n options = {'t1': int(ts1), 't2': int(ts2), 'flavor': 'raw', 'n': 1}\n uri = \"{}/data/?id={}\".format(data_url, idstr)\n t_start = time.time()\n try:\n resp = s.get(uri, params=options)\n except requests.ConnectionError as e:\n logging.error('ConnectionError: %s', e)\n logging.error('check status of ssh tunnel to trending server')\n if resp.status_code != 200:\n logging.error('invalid response %s from Trending Server',\n resp.status_code)\n return None\n logging.debug('URL=%s', resp.url)\n logging.debug('#---->%s: dt=%.3f', \"channels\", (time.time() - t_start))\n s.close()\n return resp.content", "def _query_data(self, index, tag):\n version, datapoints = yield self.quasar.stream_get(self.name, tag, tag+(15*qdf.MINUTE))\n values = np.empty((BLOCK_SIZE,), dtype=(type(datapoints[0])))\n values[:] = None\n \n for point in datapoints:\n time = float(point.time - tag)\n time_index = int(round(time*SAMPLE_RATE/qdf.SECOND))\n values[time_index] = point\n\n self.cache[index][CACHE_INDEX_TAG] = tag\n self.cache[index][CACHE_INDEX_DATA] = values", "def GetAllLocalSensors(sm_dict, northing, easting, current_time):\n current_SM_dict = {\"sm_sensor\" : [], \"dist\" : [], \"sm_val\" : []}; SM_ind = -99\n for sensor in sm_dict.keys():\n if SM_ind < -1: SM_ind = Precip.GetLastPrecipInd(sm_dict[sensor]['SM_df'], current_time, 'Year', 'DOY') #only need this once \n current_SM_dict['sm_sensor'].append(sensor); current_SM_dict['sm_val'].append(sm_dict[sensor]['SM_df']['SM'][SM_ind])\n current_SM_dict['dist'].append(math.sqrt((sm_dict[sensor]['Northing'] - northing)**2 + (sm_dict[sensor]['Easting'] - easting)**2))\n return pd.DataFrame(current_SM_dict)", "def get_measurement(self, date_and_time: datetime.datetime):\n time = date_and_time.strftime('%Y-%m-%d %H:%M:%S')\n\n self.__database_cursor.execute(f'SELECT sensor_used FROM measurements WHERE measurement_time = \"{time}\"')\n sensor_used = self.__database_cursor.fetchall()\n\n sensor_used = sensor_used[0][0]\n result = None\n\n if sensor_used == 'SLP':\n result = self.__get_measurement_SLP(time)\n elif sensor_used == 'DLP':\n pass\n elif sensor_used == 'HEA':\n pass\n else:\n raise ValueError('Database Error: sensor used for retrieved measurement is invalid')\n\n return result", "def mean_sensor_id_get(sensor_id, start_date=None, end_date=None): # noqa: E501\n try:\n client = InfluxDBClient('influxdb', 8086, 'user', 'user', 'sensor')\n sensor_id = \"laptop_temperature_1\"\n str = \"\"\n if start_date is not None:\n str = f\"WHERE time > '{datetime.fromtimestamp(start_date)}'\"\n if end_date is not None:\n if len(str) > 0:\n str += \" AND \"\n else:\n str = \"WHERE \"\n str += f\"time < '{datetime.fromtimestamp(end_date)}'\"\n request = f\"SELECT mean({sensor_id}) from client1 {str} GROUP BY *;\"\n print(request)\n result = client.query(request)\n mean = list(result.get_points())[0]['mean']\n except:\n traceback.print_exc()\n return []\n return [mean]", "def test_source_sensor(self):\n\n url = '/%s/jobs/?source_sensor=%s' % (self.api, self.s_sensor)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['source_sensor'], self.s_sensor)", "def get(self, request, pk):\n sensor_obj = get_object_or_404(SensorData, id=pk)\n serializer = SensorDataSerializer(sensor_obj)\n return Response(serializer.data, 200)", "def connect(self, connection):\n self.conn = connection\n if self.type in (\"current\", \"forecast\"):\n self.sensor = \"https://api.openweathermap.org/data/2.5/onecall?exclude=minutely&%s\" % (str(self.conn))\n elif self.type in (\"historical\"):\n self.sensor = \"https://api.openweathermap.org/data/2.5/onecall/timemachine?%s&dt=\" % (str(self.conn))\n Logger.log(LOG_LEVEL[\"debug\"], 'OwmapiSensor: apicall: ' + str(self.sensor))", "def get_wind_data(start, end):\n\n con = connect_to_db()\n query = f'SELECT rssi FROM rala WHERE id > \"{start}\" AND id <= \"{end}\";'\n df = pd.read_sql_query(query, con)\n return df", "def get(self):\n args = search_parser.parse_args()\n return_status = None\n deviceid = request.args['deviceid']\n start_time=request.args['start_time']\n end_time=request.args['end_time']\n log.debug(request.args)\n result = {}\n try:\n start_time=start_time.replace(\"T\", \" \")\n end_time=end_time.replace(\"T\", \" \")\n log.debug(\"deviceId searched for : \" + deviceid+ \" Start Time:\"+start_time+\" end_time:\"+end_time)\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n \n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\": \"SELECT * FROM \\\"ttd_devices\\\" WHERE deviceId=\\'%s\\' AND time >= '%s' AND time <= '%s' \"%(deviceid,start_time,end_time)}\n response = requests.request(\"GET\", url, params=querystring) \n D=json.loads(response.text)\n #log.debug('------------------------------------------')\n #log.debug(D)\n #log.debug('------------------------------------------')\n response_dict=[]\n for element in D['results'][0]['series'][0]['values']:\n temp_dict=dict(zip(D['results'][0]['series'][0]['columns'],element))\n processed_dict=dict()\n for key,value in temp_dict.items():\n if value is not None and value != np.nan:\n if key == 'tStamp':\n timestamp = datetime.fromtimestamp(eval(value))\n value=timestamp.strftime('%Y-%m-%d %H:%M:%S')\n elif key == 'ipAddress':\n value=eval(value)\n elif key == 'time':\n value=str(pd.to_datetime(value, format=\"%Y-%m-%dT%H:%M:%S.%fZ\"))\n processed_dict[key]=value \n response_dict.append(processed_dict)\n #log.debug('------------------------------------------')\n #log.debug(response_dict)\n #log.debug('------------------------------------------')\n result['status'] = 1\n result['message']=response_dict\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while processing the request for search')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while doing search')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while processing the request for search'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp", "def update(self, sensors, first_week, last_week):\n\n # most recent issue\n if last_week is None:\n last_issue = get_most_recent_issue(self.epidata)\n last_week = flu.add_epiweeks(last_issue, +1)\n\n # connect\n with self.database as database:\n\n # update each sensor\n for (name, loc) in sensors:\n\n # update each location\n for location in get_location_list(loc):\n\n # timing\n ew1 = first_week\n if ew1 is None:\n ew1 = database.get_most_recent_epiweek(name, location)\n if ew1 is None:\n # If an existing sensor reading wasn't found in the database and\n # no start week was given, just assume that readings should start\n # at 2010w40.\n ew1 = 201040\n print('%s-%s not found, starting at %d' % (name, location, ew1))\n\n args = (name, location, ew1, last_week)\n print('Updating %s-%s from %d to %d.' % args)\n for test_week in flu.range_epiweeks(ew1, last_week, inclusive=True):\n self.update_single(database, test_week, name, location)", "def start_wireless_sensing(self):\n\n gps_pos = self.dc.read_gps()\n n_samples = 256 # DON'T CHANGE TO 128!!!!! IT CAUSES KERNEL PANIC (unless you change tick or find another fix)\n if IS_SIMULATION:\n dBm = self.get_simulated_dBm()\n # dBm = random.uniform(-1, -10)\n self.dBm = dBm\n self.ContSamples += 1\n time.sleep(0.01)\n else:\n self.ContSamples += 1\n samples = self.sdr.read_samples(n_samples)\n dBm = 10 * np.log10(np.mean(np.power(np.abs(samples), 2)))\n self.dBm = dBm\n\n if self.ContSamples > self.SamplesToDiscard:\n\n wireless_msg0 = HotspotWirelessMessage(\n location=gps_pos,\n sdr=[],\n dBm=dBm,\n )\n self.flight_logger.log(wireless_msg0)\n\n now = time.time()\n # if ((dBm > THRESHOLD_dBm) and (now - self.lastInsert) >= THRESHOLD_Sampling):\n\n if (now - self.lastInsert) >= THRESHOLD_Sampling:\n if self.FLAG == 2:\n wireless_msg = HotspotWirelessMessage(\n location=self.dc.read_gps(),\n sdr=[],\n dBm=self.dBm,\n )\n self.wireless_logger.log(wireless_msg)\n\n if self.FLAG == 1:\n wireless_msg2 = HotspotFilterMessage(\n hotcaltime=self.hottime,\n alepcaltime=self.aleptime,\n survetime=self.surveytime,\n swarmtime=self.swarmtime,\n FLAG=self.FLAG,\n location=self.dc.read_gps(),\n sdr=[],\n dBm=self.dBm,\n )\n\n self.wireless_filter.log(wireless_msg2)\n\n self.wireless_data.append(wireless_msg0)\n\n self.lastInsert = time.time()\n # if len(self.wireless_data) >= SAMPLES_SWARM * self.sentData:\n if len(self.wireless_data) >= SAMPLES_SL:\n # self.sentData +=1\n self.ready_to_send = True", "def new_switch_heating():\n logging.debug(\"Get list of current sensors from need_heating which need switching on\") \n on_sensor_list=select_sql(\"select sensor from need_heat\")\n #on_sensor_list=select_sql(\"select sensors from sensor_master where current_status = 0 and required_status = 1\")\n for i in range(0,len(on_sensor_list)):\n for sensor in on_sensor_list[i]:\n switch_relay(sensor)\n move_sensor(sensor,\"need_heat\",\"heating_on\")\n\n logging.debug(\"Get list of current sensors from hot_enough which need switching off\") \n off_sensor_list=select_sql(\"select sensor from hot_enough\")\n #off_sensor_list=select_sql(\"select sensors from sensor_master where current_status = 1 and required_status = 0\")\n for i in range(0,len(off_sensor_list)):\n for sensor in off_sensor_list[i]:\n switch_relay(sensor)\n move_sensor(sensor,\"hot_enough\",\"heating_off\")", "def getSensors(self):\n return self.listener.sensors", "def switch_heating():\n logging.debug(\"Get list of current sensors from need_heating which need switching on\") \n on_sensor_list=select_sql(\"select sensor from need_heat\")\n \n for i in range(0,len(on_sensor_list)):\n for sensor in on_sensor_list[i]:\n switch_relay(sensor)\n move_sensor(sensor,\"need_heat\",\"heating_on\")\n\n logging.debug(\"Get list of current sensors from hot_enough which need switching off\") \n off_sensor_list=select_sql(\"select sensor from hot_enough\")\n \n for i in range(0,len(off_sensor_list)):\n for sensor in off_sensor_list[i]:\n switch_relay(sensor)\n move_sensor(sensor,\"hot_enough\",\"heating_off\")", "def wql_values_query(self, start_node, path):\n self.tr_id = get_tr_id()\n xml_msg = self._create_wql_values_msg(self.tr_id, start_node, path)\n self.conn.connect()\n self.conn.send(xml_msg)\n response = self.conn.receive()\n self._check_error(response)\n if \"results\" in response:\n node_list = parse_URI_list(response[\"results\"])\n return node_list\n else:\n raise SIBError(M3_SIB_ERROR)", "def sensor_value(self, sensor_name, components=None, include_value_ts=False):\n if isinstance(components, list):\n components = \",\".join(components)\n else:\n components = \"all\"\n\n url = (yield self.get_sitemap())['monitor'] + '/list-sensors/' + components\n\n response = yield self._http_client.fetch(\n \"{}?reading_only=1&name_filter=^{}$\".format(url, sensor_name))\n try:\n results = json.loads(response.body)\n except ValueError:\n raise InvalidResponseError(\n \"Request to {} did not respond with valid JSON\".format(url))\n\n if len(results) == 0:\n raise SensorNotFoundError(\"Value for sensor {} not found\".format(sensor_name))\n\n result_to_format = None\n\n if len(results) > 1:\n # check for exact match, before giving up\n for result in results:\n if result['name'] == sensor_name:\n result_to_format = result\n break\n else:\n raise SensorNotFoundError(\n \"Multiple sensors ({}) found - specify a single sensor \"\n \"name not a pattern like: '{}'. (Some matches: {}).\"\n .format(len(results),\n sensor_name,\n [result['name'] for result in results[0:5]]))\n else:\n result_to_format = results[0]\n\n if include_value_ts:\n raise tornado.gen.Return(SensorSampleValueTime(\n sample_time=result_to_format['time'],\n value_time=result_to_format['value_ts'],\n value=result_to_format['value'],\n status=result_to_format['status']))\n else:\n raise tornado.gen.Return(SensorSample(\n sample_time=result_to_format['time'],\n value=result_to_format['value'],\n status=result_to_format['status']))", "def lights_energy_use(dt_from_, dt_to_):\n\n dt_from = pd.to_datetime(dt_from_.date()) + timedelta(hours=14)\n dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15)\n\n d_from = pd.to_datetime(dt_from_.date())\n d_to = pd.to_datetime(dt_to_.date())\n\n col_ec = \"electricity_consumption\"\n sensor_device_id = \"Clapham\"\n lights_on_cols = []\n\n # getting eneregy data for the analysis\n query = db.session.query(\n ReadingsEnergyClass.timestamp,\n ReadingsEnergyClass.electricity_consumption,\n ).filter(\n and_(\n SensorClass.device_id == sensor_device_id,\n ReadingsEnergyClass.sensor_id == SensorClass.id,\n ReadingsEnergyClass.timestamp >= dt_from,\n ReadingsEnergyClass.timestamp <= dt_to,\n )\n )\n\n df = pd.read_sql(query.statement, query.session.bind)\n\n if df.empty:\n return pd.DataFrame({\"date\": [], \"mean_lights_on\": []})\n\n # Reseting index\n df.sort_values(by=[\"timestamp\"], ascending=True).reset_index(inplace=True)\n\n # grouping data by date-hour\n energy_hour = (\n df.groupby(\n by=[\n df[\"timestamp\"].map(\n lambda x: pd.to_datetime(\n \"%04d-%02d-%02d-%02d\" % (x.year, x.month, x.day, x.hour),\n format=\"%Y-%m-%d-%H\",\n )\n ),\n ]\n )[\"electricity_consumption\"]\n .sum()\n .reset_index()\n )\n\n # Sorting and reseting index\n energy_hour.sort_values(by=[\"timestamp\"], ascending=True).reset_index(inplace=True)\n\n # energy dates. Energy date starts from 4pm each day and lasts for 24 hours\n energy_hour.loc[\n energy_hour[\"timestamp\"].dt.hour < 15, \"energy_date\"\n ] = pd.to_datetime((energy_hour[\"timestamp\"] + timedelta(days=-1)).dt.date)\n energy_hour.loc[\n energy_hour[\"timestamp\"].dt.hour >= 15, \"energy_date\"\n ] = pd.to_datetime(energy_hour[\"timestamp\"].dt.date)\n\n # Clasification of lights being on\n\n # Lights ON 1: Lights turn on at 4pm and turn off at 9am, as scheduled.\n energy_hour[\"lights_on_1\"] = energy_hour[\"timestamp\"].apply(\n lambda x: 1 if (x.hour >= 17 or x.hour < 10) else 0\n )\n lights_on_cols.append(\"lights_on_1\")\n\n # Lights ON 2: Lights are calculated by estimating the lighting use as between\n # the minima of two consecutive days. The lights are considered on when the\n # energy use is above the day's first quartile of lighting of this difference.\n # energy_hour['lights_on_2'] = 0\n # lights_on_cols.append('lights_on_2')\n\n # Lights ON 3: Lights are assumed to be on if the energy demand is over 30 kW\n # (max load of the extraction fan)\n energy_hour[\"lights_on_3\"] = energy_hour[col_ec].apply(\n lambda x: 1 if (x > 30.0) else 0\n )\n lights_on_cols.append(\"lights_on_3\")\n\n # Lights ON 4: Lights are assumed to turn on at the time of largest energy use\n # increase in the day, and turn off at the time of largest energy decrease of\n # the day.\n\n # estimating energy difference\n energy_hour[\"dE\"] = energy_hour[col_ec] - energy_hour[col_ec].shift(1)\n energy_hour[\"dE\"] = energy_hour[\"dE\"].fillna(0.0)\n\n # finding max increase and min decrease\n energy_hour[\"dE_min\"] = energy_hour.groupby(\"energy_date\")[\"dE\"].transform(\"min\")\n energy_hour[\"dE_max\"] = energy_hour.groupby(\"energy_date\")[\"dE\"].transform(\"max\")\n\n energy_hour.loc[\n np.isclose(energy_hour[\"dE_max\"], energy_hour[\"dE\"]), \"lights_on_4\"\n ] = 1\n energy_hour.loc[\n np.isclose(energy_hour[\"dE_min\"], energy_hour[\"dE\"]), \"lights_on_4\"\n ] = 0\n\n # repeat last?\n prev_row_value = None\n for df_index in energy_hour.index:\n if df_index > 0:\n if np.isnan(energy_hour.loc[df_index, \"lights_on_4\"]) and not np.isnan(\n prev_row_value\n ):\n\n energy_hour.loc[df_index, \"lights_on_4\"] = prev_row_value\n prev_row_value = energy_hour.loc[df_index, \"lights_on_4\"]\n\n lights_on_cols.append(\"lights_on_4\")\n\n # Lights ON 5: Lights are assumed on if the energy use is over 0.9\n # times the days' energy use mean, and the energy demand is over 30 kW.\n\n energy_hour[\"energy_date_mean\"] = energy_hour.groupby(\"energy_date\")[\n col_ec\n ].transform(\"mean\")\n\n energy_hour[\"lights_on_5\"] = np.where(\n (energy_hour[col_ec] > 30.0)\n & (energy_hour[col_ec] > 0.9 * energy_hour[\"energy_date_mean\"]),\n 1,\n 0,\n )\n\n lights_on_cols.append(\"lights_on_5\")\n\n # getting the mean value of lights on per day\n energy_date_df = energy_hour.loc[\n (energy_hour[\"energy_date\"] >= d_from) & (energy_hour[\"energy_date\"] <= d_to)\n ]\n energy_date_df = (\n energy_date_df.groupby(by=[\"energy_date\"])[lights_on_cols].sum().reset_index()\n )\n energy_date_df[\"mean_lights_on\"] = energy_date_df[lights_on_cols].sum(axis=1) / len(\n lights_on_cols\n )\n energy_date_df[\"date\"] = energy_date_df[\"energy_date\"].dt.strftime(\"%Y-%m-%d\")\n\n lights_results_df = energy_date_df[[\"date\", \"mean_lights_on\"]]\n\n return lights_results_df", "def send_data():\n range = request.args.get('range', '30')\n time = arrow.utcnow().replace(minutes=-int(range))\n data = Temperature.query\\\n .filter(Temperature.timestamp > time).order_by(Temperature.timestamp.desc()).all()\n return jsonify(results=[i.serialize for i in data])", "def request_device_readings_quartiles(device_uuid):\n\n # Set the db that we want and open the connection\n start = request.args.get('start')\n end = request.args.get('end')\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n #check for start\n if start != None and end != None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n start, end, \n device_uuid, \n start, end, \n device_uuid, \n device_uuid, \n start, end,\n device_uuid, \n start, end,\n device_uuid, \n device_uuid, \n start, end,\n device_uuid, \n start, end\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start != None and end == None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n start, \n device_uuid, \n start,\n device_uuid, \n device_uuid, \n start,\n device_uuid, \n start,\n device_uuid, \n device_uuid, \n start,\n device_uuid, \n start,\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start == None and end != None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n end, \n device_uuid, \n end, \n device_uuid, \n device_uuid, \n end,\n device_uuid, \n end,\n device_uuid, \n device_uuid, \n end,\n device_uuid, end\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start == None and end == None:\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n ) as T3\n '''.format(device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200", "def refresh_observation(self, measurement: EngineObsType) -> None:\n observation = self.observation\n observation[\"t\"][()] = measurement[\"t\"]\n _array_copyto(observation['states']['agent']['q'],\n measurement['states']['agent']['q'])\n _array_copyto(observation['states']['agent']['v'],\n measurement['states']['agent']['v'])\n sensors_data = observation['measurements']\n for key, value in dict.items(measurement['measurements']):\n _array_copyto(sensors_data[key], value)", "def query(measurement, limit):\n return client().query(\n f\"\"\"\n SELECT pair, open, high, low, close\n FROM {measurement} WHERE pair =~ /^USDC_*/\n GROUP BY pair\n ORDER BY time DESC LIMIT {limit}\n \"\"\")", "def update(self):\n print(\"sensorState Update\")", "async def query(self, metric):\n metric_name = metric.spec.provider.metric\n\n url = self.metrics_provider.spec.influx.url\n token = self.metrics_provider.spec.influx.token\n org = self.metrics_provider.spec.influx.org\n bucket_name = self.metrics_provider.spec.influx.bucket\n\n client = InfluxDBClient(url=url, token=token, org=org)\n query_api = client.query_api()\n\n query = f'''\n from(bucket:\"{bucket_name}\")\n |> range(start: -1h)\n |> filter(fn: (r) => r._measurement == \"{metric_name}\")\n |> last()\n '''\n\n try:\n loop = asyncio.get_event_loop()\n result = await loop.run_in_executor(None, query_api.query, query)\n for table in result:\n for record in table.records:\n response = record.values['_value']\n return float(response)\n\n except Exception as err:\n metric_provider_name = self.metrics_provider.metadata.name\n raise MetricsProviderError(\n f\"Failed to query InfluxDB with provider {metric_provider_name!r}\"\n ) from err\n\n raise MetricError(f\"Metric {metric_name!r} not in InfluxDB response\")", "def readSensorStringToSensor(sen_line):\n sensor = [None]*15\n\n sensor[Sensor.TIME] = unidecode.unidecode(sen_line[15:33].strip())\n sensor[Sensor.ENDTIME] = unidecode.unidecode(sen_line[35:51].strip())\n sensor[Sensor.JDATE] = unidecode.unidecode(stringToDate(sen_line[71:78].strip()))\n sensor[Sensor.CALRATIO] = unidecode.unidecode(sen_line[78:96].strip())\n sensor[Sensor.CALPER] = unidecode.unidecode(sen_line[95:112] .strip())\n sensor[Sensor.TSHIFT] = unidecode.unidecode(sen_line[113:119].strip())\n sensor[Sensor.INSTANT] = unidecode.unidecode(sen_line[120].strip())\n sensor[Sensor.LDDATE] = unidecode.unidecode(stringToDate(sen_line[122:].strip()))\n sensor[Sensor.CHANNEL_CSS_ID] = unidecode.unidecode(sen_line[62:69].strip())\n sensor[Sensor.INSTRUMENT_CSS_ID]= unidecode.unidecode(sen_line[51:60].strip())\n sensor[Sensor.S_ID] = -1\n sensor[Sensor.STATION_CODE] = unidecode.unidecode(sen_line[:7].strip())\n sensor[Sensor.CHANNEL_CODE] = unidecode.unidecode(sen_line[7:15].strip())\n sensor[Sensor.CHANNEL_ID] = -1\n sensor[Sensor.INSTRUMENT_ID]= -1\n\n return Sensor(sensor)", "def update(self):\n self.sensor.update()", "def request_device_readings_quartiles(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n if not start:\n return 'error on the required start data', 400\n end = post_data.get('end', None)\n if not end:\n return 'error on the required end data', 400\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ? AND r.date_created >= ? AND r.date_created <= ?'\n params = [type, device_uuid, start, end]\n\n sql += 'ORDER BY r.value'\n\n # Execute the query\n cur.execute(sql, params)\n rows = [row[0] for row in cur.fetchall()]\n\n mid = len(rows) // 2\n\n if (len(rows) % 2 == 0):\n # even\n lowerQ = median(rows[:mid])\n upperQ = median(rows[mid:])\n else:\n # odd\n lowerQ = median(rows[:mid]) # same as even\n upperQ = median(rows[mid + 1:])\n\n return str(lowerQ) + \",\" + str(upperQ), 200", "def query_seconds(seconds = 60):\n \n # some empty arrays to store the data\n pm25 = []\n pm10 = []\n \n # wake up sensor\n sensor.sleep(sleep = False)\n \n # wait 15 seconds to make sure sensor is stable\n time.sleep(15)\n \n # create a unique identifier to use for the particlar poll\n poll_uuid = [str(uuid.uuid4())]\n \n # query for xx seconds\n t_end = time.time() + seconds\n while time.time() < t_end:\n # get data from sensor\n vals = sensor.query()\n # check that the value is OK, is a tuple, then store data\n if type(vals) is tuple:\n pm25.append(vals[0])\n pm10.append(vals[1])\n \n # only save aggregate data in case there is data in the arrays\n l_pm25 = len(pm25)\n l_pm10 = len(pm10)\n \n logging.debug(f\"length of pm25 list is {l_pm25}\")\n logging.debug(f\"length of pm10 list is {l_pm10}\")\n if l_pm25 > 0 and l_pm10 > 0:\n logging.debug(\"saving aggregated data\")\n save_aggregated(dbfile = DB_FILE,\n pm25list = pm25,\n pm10list = pm10,\n poid = poll_uuid)\n else:\n logging.debug(\"failed to save aggregate data\")\n sensor.sleep()", "def get_water_distance_raw(\n user_id: int,\n database_path: Optional[str] = config.DATABASE_PATH\n) -> pd.Series:\n sql_s = f\"SELECT timestamp_ms, value FROM data WHERE data_capture_id={user_id} AND sensor_id=1\"\n conn = sqlite3.connect(database_path)\n cursor = conn.execute(sql_s)\n time_measurements = []\n distance_measurements = []\n for entry in cursor:\n time_measurements.append(entry[0])\n distance_measurements.append(entry[1])\n data_r = pd.Series(distance_measurements, index=time_measurements)\n\n return data_r", "def web():\r\n wifi.conectar()\r\n #esto para especificar una ip y se deja en blanco para que la tome\r\n #automaticamente y se le asigna un puerto\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.bind(('', 80))\r\n s.listen(5)\r\n \r\n while True:\r\n \r\n #Recibir datos del sensor dht11\r\n temp, hum =sensor.recibir()\r\n sleep(2)\r\n \r\n #Enviar dato del sensor de temperatura a la api de thingspeak\r\n apiTemp=\"https://api.thingspeak.com/update?api_key=38YCW8L4OCXQGK6H&field1=\"\r\n apiTemp = apiTemp + str(temp)\r\n r1 = urequests.get(apiTemp)\r\n print(r1.json())\r\n \r\n #Enviar dato del sensor de humedad a la api de thingspeak\r\n apiHum=\"https://api.thingspeak.com/update?api_key=ORDLJDO3MN6OX9Q6&field1=\"\r\n apiHum = apiHum + str(hum)\r\n r2 = urequests.get(apiHum)\r\n print(r2.json())\r\n \r\n #guardar la conexión y dirección del dispositivo \r\n conn, addr = s.accept()\r\n print(\"Nueva conexión desde %s\" % str(addr))\r\n \r\n request=conn.recv(1024)\r\n print(\"\")\r\n print(\"Solicitud %s\" % str(request))\r\n\r\n request = str(request)\r\n \r\n #condiciones de temperatura si sobrepasan el rango requerido\r\n if (temp < 15 or temp > 20):\r\n t = \"Está fuera del rango\"\r\n else:\r\n t = \"Está dentro del rango\"\r\n \r\n if (hum < 45 or hum > 60):\r\n h = \"Está fuera del rango\"\r\n else:\r\n h = \"Está dentro del rango\"\r\n \r\n #Pagina web para mostrar los datos de los sensores y thingspeak\r\n pagina =\"\"\"<!DOCTYPE HTML>\r\n <html>\r\n <head>\r\n <meta charset=\"utf-8\">\r\n </head>\r\n <body style=\"background-color:#BCDCF1;\">\r\n \r\n <center>\r\n <h1>Medición y monitorización </h1>\r\n <iframe src=\"https://thingspeak.com/channels/1456301/charts/1?bgcolor=%23ffffff&color=%23d62020&dynamic=true&results=60&type=line&update=15\" height=\"260\" width=\"460\" title=\"Temperatura\"></iframe>\r\n <h2 style=\"color: #6F40A8;\">Temperatura es \"\"\" + str(temp) +\"\"\" </h2> \r\n <h2>\"\"\"+t+\"\"\"</h2>\r\n \r\n <br>\r\n <iframe src=\"https://thingspeak.com/channels/1456302/charts/1?bgcolor=%23ffffff&color=%23d62020&dynamic=true&results=60&type=line&update=15\" height=\"260\" width=\"460\" title=\"Humedad\"></iframe>\r\n <h2 style=\"color: #6F40A8;\">Humedad es \"\"\" + str(hum) + \"\"\" </h2>\r\n <h2>\"\"\"+h+\"\"\"</h2>\r\n </center>\r\n </body>\r\n </html>\"\"\"\r\n #mostramos pagina web y cerramos la pagina web\r\n conn.send('HTTP/1.1 200 OK\\n')\r\n conn.send('Content-Type: text/html\\n')\r\n conn.send('Connection: close\\n\\n')\r\n conn.sendall(pagina)\r\n conn.close()", "def query_climate_range(**kwargs):\n # print(\"Start time: \", kwargs['start_time'], \" End time: \", kwargs['end_time'])\n if not kwargs['start_time']:\n kwargs['start_time'] = 0\n if not kwargs['end_time']:\n # Searching for data 1 day into the 'future' as a max limit seems fair.\n kwargs['end_time'] = time.time() + 24*3600\n # TODO: Look at this\n # print(\"Start time: \", kwargs['start_time'], \" End time: \", kwargs['end_time'])\n if kwargs['sensor_id']:\n return query_db('SELECT * FROM climate WHERE ( time > ? AND time < ? AND sensor_id = ?)',\n [kwargs['start_time'], kwargs['end_time'], kwargs['sensor_id']])\n else:\n return query_db('SELECT * FROM climate WHERE (time > ? AND time < ?)',\n [kwargs['start_time'], kwargs['end_time']])", "def sensor_state(self, uuid):\n url = \"%s/state/teams/%s/sensors/%s\" % (self.url, self.identifier, uuid)\n return perform_request(url)", "def get_realtime_stream(self):\n ws = 0\n url = WS_URL % (self.sense_monitor_id, self.sense_access_token)\n try:\n ws = create_connection(url, timeout=self.wss_timeout, sslopt={'ciphers': 'DEFAULT@SECLEVEL=1'})\n while True: # hello, features, [updates,] data\n result = json.loads(ws.recv())\n if result.get('type') == 'realtime_update':\n data = result['payload']\n self.set_realtime(data)\n yield data\n except WebSocketTimeoutException:\n raise SenseAPITimeoutException(\"API websocket timed out\")\n finally:\n if ws: ws.close()", "def query(self):\n self._measurements[self.KEY_USAGE].df = self.fetch_data_usage()", "def list_sensors(request):\n sensors = get_all_sensors()\n table_rows = []\n\n if sensors is not None:\n for sensor in sensors:\n table_rows.append(\n (\n sensor.id, sensor.longitude, sensor.latitude\n )\n )\n\n sensor_table = DataTableView(\n column_names = ('id', 'latitude', 'longitude'),\n rows = table_rows,\n searching = False,\n orderClasses = False,\n lengthMenu=[ [10, 25, 50, -1], [10, 25, 50, \"All\"] ]\n )\n\n context = {\n 'sensor_table' : sensor_table\n }\n\n return render(request, 'open_air/list_sensors.html', context)", "def aranet_trh_query(dt_from, dt_to):\n locations_query = queries.latest_sensor_locations(db.session).subquery(\n \"sensor_locations\"\n )\n query = db.session.query(\n ReadingsAranetTRHClass.timestamp,\n ReadingsAranetTRHClass.sensor_id,\n ReadingsAranetTRHClass.temperature,\n ReadingsAranetTRHClass.humidity,\n LocationClass.zone,\n ).filter(\n and_(\n locations_query.c.location_id == LocationClass.id,\n ReadingsAranetTRHClass.sensor_id == locations_query.c.sensor_id,\n ReadingsAranetTRHClass.timestamp >= dt_from,\n ReadingsAranetTRHClass.timestamp <= dt_to,\n )\n )\n\n df = pd.read_sql(query.statement, query.session.bind)\n\n logging.info(\"Total number of records found: %d\" % (len(df.index)))\n\n if df.empty:\n logging.debug(\"WARNING: Query returned empty\")\n\n return df", "def __init__(self, sensor):\n self.sensor = sensor\n self.sensor.update()", "def read_sensor_data():\n global light_scheme_set, current_timeout\n\n # prevents very rapid changes of the color scheme\n if current_timeout is not 0:\n current_timeout -= 1\n return\n else:\n # call the shared library's sensor code\n reading = dll.readSensor()\n scheme = None\n\n # check if the scheme needs to be changed\n if reading >= settings.get('threshold') and light_scheme_set is not True:\n scheme = settings.get('light_color_scheme')\n light_scheme_set = True\n\n elif reading < settings.get('threshold') and light_scheme_set is not False:\n scheme = settings.get('dark_color_scheme')\n light_scheme_set = False\n\n # change user settings\n if scheme is not None:\n global_settings = sublime.load_settings('Preferences.sublime-settings')\n if global_settings.get('color_scheme') != scheme:\n global_settings.set('color_scheme', scheme)\n sublime.save_settings('Preferences.sublime-settings')\n current_timeout = settings.get('cycle_timeout')", "def get_smps(client, start, end):\n # load image or load from bigquery\n smps_query_str = \"SELECT * FROM cfog.sharp_smps \" +\\\n f\"WHERE timestamp BETWEEN '{start}' AND '{end}' \" +\\\n \"ORDER BY timestamp ASC\"\n print(f\"Executing bigquery query string: \")\n print(smps_query_str + '\\n')\n\n smps_query_job = client.query(smps_query_str)\n smps_query_job.result()\n smps_data = smps_query_job.to_dataframe()\n\n values = np.array(smps_data['values'].values.tolist()).T\n lowBouDia = np.array(smps_data['lowBouDia'].values.tolist()).T\n highBouDia = np.array(smps_data['highBouDia'].values.tolist()).T\n midDia = np.array(smps_data['midDia'].values.tolist()).T\n smps_data_df = smps_data.drop(columns=['values','lowBouDia','highBouDia','midDia']).set_index('timestamp')\n smps_data_out = dict(values=values,\n lowBouDia=lowBouDia,\n highBouDia=highBouDia,\n midDia=midDia,\n df=smps_data_df)\n\n print(f\"Query complete. Total number of data entries: {smps_data_out['df'].shape[0]}.\\n\\n\")\n return smps_data_out", "def query(self, qid):\r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n lst = []\r\n\r\n #========================================================================\r\n # Preparation\r\n #========================================================================\r\n whereStatement, ranges, morPrep, insert, Levels, rangeTab = self.prepareQuery(qid)\r\n lst.append(round(morPrep, 6)) # preparation\r\n lst.append(round(insert, 6)) # insert ranges into table\r\n lst.append(ranges) #number of ranges\r\n lst.append(Levels) #depth of the tree\r\n\r\n #========================================================================\r\n # First approximation of query region\r\n #========================================================================\r\n\r\n if whereStatement is not '':\r\n if rangeTab is not None:\r\n query = \"SELECT \" + ora.getHintStatement(['USE_NL (t r)', ora.getParallelStringQuery(self.numProcesses)]) + \\\r\n\" \" + ', '.join(['t.'+ i for i in self.columnNames]) + \"\"\"\r\nFROM \"\"\" + self.iotTableName + \" t, \" + rangeTab + \"\"\" r \r\n\"\"\" + whereStatement\r\n\r\n else:\r\n query = \"SELECT \"+ ora.getHintStatement([ora.getParallelStringQuery(self.numProcesses)]) + ', '.join(self.columnNames) + \"\"\" \r\nFROM \"\"\" + self.iotTableName + \"\"\" \r\n\"\"\" + whereStatement\r\n\r\n start1 = time.time()\r\n ora.mogrifyExecute(cursor, query)\r\n result = cursor.fetchall()\r\n\r\n lst.append(round(time.time() - start1, 10)) # fetching\r\n \r\n if (self.integration == 'loose' and self.qtype.lower() != 'time') or self.integration == 'deep':\r\n qTable = self.queryTable + '_temp_' + qid\r\n else: \r\n qTable = self.queryTable + '_' + qid\r\n \r\n start1 = time.time()\r\n decoded = self.decodeSpaceTime(result)\r\n lst.append(round(time.time() - start1, 6)) #decoding\r\n\r\n start1 = time.time()\r\n res = self.storeQuery(qTable, self.queryColumns, decoded, True)\r\n lst.append(round(time.time() - start1, 6)) #storing\r\n if res != []:\r\n ptsInTemp = res\r\n lst.append(res) #approximate points\r\n else:\r\n ptsInTemp = 0\r\n \r\n #==================================================================\r\n # Secondary filtering of query region\r\n #==================================================================\r\n\r\n if (self.qtype.lower() == 'time' and self.integration == 'loose') or res == []:\r\n # no data returned or it is a time query in the loose integration\r\n lst.append(ptsInTemp) #approximate points\r\n lst.append(0) # point in polygon time\r\n return lst\r\n else:\r\n \r\n if self.integration.lower() == 'deep' and self.qtype.lower() == 'time':\r\n queryTab = self.iotTableName + '_res_' + str(qid)\r\n timeWhere = whereClause.addTimeCondition(getTime(self.granularity, self.start_date, self.end_date), 'TIME', self.timeType)\r\n zWhere = whereClause.addZCondition([self.ozmin, self.ozmax], 'Z')\r\n whereValue = whereClause.getWhereStatement([timeWhere, zWhere])\r\n \r\n \r\n if self.granularity == 'day':\r\n query = \"CREATE TABLE \" + queryTab + \"\"\"\r\n\"\"\" + ora.getTableSpaceString(self.tableSpace) + \"\"\"\r\nAS SELECT * \r\nFROM (\r\n SELECT \"\"\" + ora.getHintStatement([ora.getParallelStringQuery(self.numProcesses)]) + \\\r\n \"\"\" X, Y, Z, TO_DATE(TIME, 'yyyy/mm/dd') as TIME \r\n FROM \"\"\" + qTable +\"\"\"\r\n ) \r\n\"\"\" + whereValue\r\n else:\r\n query = \"CREATE TABLE \" + queryTab + \"\"\"\r\n\"\"\" + ora.getTableSpaceString(self.tableSpace) + \"\"\" \r\n AS SELECT \"\"\" + ora.getHintStatement([ora.getParallelStringQuery(self.numProcesses)]) + \\\r\n \"\"\" X, Y, Z, TIME \r\n FROM \"\"\"+ qTable + \"\"\"\" \r\n \"\"\" + whereValue\r\n \r\n start1 = time.time()\r\n cursor.execute(query)\r\n end = round(time.time() - start1, 2)\r\n\r\n ora.dropTable(cursor, qTable, False)\r\n final = ora.getNumPoints(connection, cursor, queryTab)\r\n lst.append(final) #final points\r\n lst.append(end) #point in polygon time\r\n return lst\r\n \r\n else:\r\n final, end = self.pointInPolygon(qTable, qid, True)\r\n \r\n lst.append(final) #final points\r\n lst.append(end) #point in polygon time\r\n return lst\r\n else:\r\n print 'No data returned'\r\n return [lst[0], lst[1], '-', '-', '-','-','-','-']", "def update():\n\n # ensure parameters are present\n if not request.args.get(\"sw\"):\n raise RuntimeError(\"missing sw\")\n if not request.args.get(\"ne\"):\n raise RuntimeError(\"missing ne\")\n\n # ensure parameters are in lat,lng format\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"sw\")):\n raise RuntimeError(\"invalid sw\")\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"ne\")):\n raise RuntimeError(\"invalid ne\")\n\n # explode southwest corner into two variables\n (sw_lat, sw_lng) = [float(s) for s in request.args.get(\"sw\").split(\",\")]\n\n # explode northeast corner into two variables\n (ne_lat, ne_lng) = [float(s) for s in request.args.get(\"ne\").split(\",\")]\n\n # find stations within view\n if (sw_lng <= ne_lng):\n # doesn't cross the antimeridian\n\n stations = Station.query.join(Place).\\\n filter(db.and_(\n sw_lat <= Place.lat, Place.lat <= ne_lat,(db.and_(\n sw_lng <= Place.lng, Place.lng <= ne_lng)))).all()\n\n else:\n # crosses the antimeridian\n\n stations = Station.query.join(Place).\\\n filter(db.and_(\n sw_lat <= Place.lat, Place.lat <= ne_lat,(db.or_(\n sw_lng <= Place.lng, Place.lng <= ne_lng)))).all()\n\n result = geo_stations.dump(stations)\n\n return jsonify(result.data)", "def test_api_sensors(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load sensors from url specified in api base\n r = requests.get(r['sensors']).json()\n self.assertIn('count', r)\n self.assertIn('sensors', r)\n self.assertIn('prev', r)\n self.assertIn('next', r)", "def _get_sensors_data(task):\n\n try:\n report = irmc_common.get_irmc_report(task.node)\n sensor = irmc.scci.get_sensor_data(report)\n\n except (exception.InvalidParameterValue,\n exception.MissingParameterValue,\n irmc.scci.SCCIInvalidInputError,\n irmc.scci.SCCIClientError) as e:\n LOG.error(\"SCCI get sensor data failed for node %(node_id)s \"\n \"with the following error: %(error)s\",\n {'node_id': task.node.uuid, 'error': e})\n raise exception.FailedToGetSensorData(\n node=task.node.uuid, error=e)\n\n sensors_data = {}\n for sdr in sensor:\n sensor_type_name = sdr.find('./Data/Decoded/Sensor/TypeName')\n sensor_type_number = sdr.find('./Data/Decoded/Sensor/Type')\n entity_name = sdr.find('./Data/Decoded/Entity/Name')\n entity_id = sdr.find('./Data/Decoded/Entity/ID')\n\n if None in (sensor_type_name, sensor_type_number,\n entity_name, entity_id):\n continue\n\n sensor_type = ('%s (%s)' %\n (sensor_type_name.text, sensor_type_number.text))\n sensor_id = ('%s (%s)' %\n (entity_name.text, entity_id.text))\n reading_value = sdr.find(\n './Data/Decoded/Sensor/Thresholds/*/Normalized')\n reading_value_text = \"None\" if (\n reading_value is None) else str(reading_value.text)\n reading_units = sdr.find('./Data/Decoded/Sensor/BaseUnitName')\n reading_units_text = \"None\" if (\n reading_units is None) else str(reading_units.text)\n sensor_reading = '%s %s' % (reading_value_text, reading_units_text)\n\n sensors_data.setdefault(sensor_type, {})[sensor_id] = {\n 'Sensor Reading': sensor_reading,\n 'Sensor ID': sensor_id,\n 'Units': reading_units_text,\n }\n\n return sensors_data", "def getSensors(self):\n sensors = array([])\n sensors = r_[sensors, self._getTotalDemandSensor()]\n# sensors = r_[sensors, self._getDemandSensor()]\n# sensors = r_[sensors, self._getPriceSensor()]\n\n# sensors = r_[sensors, self._getBusVoltageSensor()]\n\n# sensors = r_[sensors, self._getBusVoltageMagnitudeSensor()]\n# sensors = r_[sensors, self._getBusVoltageLambdaSensor()]\n# sensors = r_[sensors, self._getBranchFlowSensor()]\n\n# logger.info(\"State: %s\" % sensors)\n\n return sensors", "def posture_sensor(axis):\n\n\treturn 0.0", "def get_sensor(name):\n name = _lookup(name)\n all_data = mc.get('sensor_values')\n try:\n return all_data[name]\n except KeyError:\n raise KeyError(\"No Sensor with that name\")", "def query_from_spec(self, current_time):\n interval = int((current_time - self.query_spec.start_time).total_seconds())\n if interval < 20.0: # less than twenty seconds has elapsed since start of the experiment\n logger.debug(\"Less than 20 seconds have elapsed since the start of the experiment\")\n return self.post_process({\n \"status\": \"success\", \n \"data\": {\n \"resultType\": \"vector\",\n \"result\": []\n }\n }, current_time)\n\n kwargs = {\n \"interval\": f\"{interval}s\",\n \"version_labels\": \",\".join(self.query_spec.version_label_keys) # also hard coded\n }\n query = self.get_query(kwargs)\n return self.query(query, current_time)", "def sensors(self) -> List[dict]:\n return self.items_by_domain(\"sensor\")", "def listSensors(self, module=None):\n if module:\n try:\n sensors = self.db.getModuleSensors(self.db.getModule(module))\n except DoesNotExist:\n return None\n else:\n sensors = self.db.getSensors()\n # convert to list of tuples\n return [(sensor.ident,\n sensor.Instant,\n sensor.Active,\n sensor.ModuleRPC.Module.name,\n sensor.descr) for sensor in sensors]", "def get_gdax_historical_data():\n \n start = None\n while not start:\n start,end,tid = getStartAndEndHistoric()\n if not start:\n time.sleep(60)\n #Todo: change this to 1min\n firsttimestamp = start\n engine = sa.create_engine(sql_address)\n products = [\"LTC-USD\",\"LTC-BTC\",\"ETH-USD\",\"ETH-BTC\",\"BTC-USD\"]\n public_client = gdax.PublicClient()\n deltat = datetime.timedelta(seconds = 200)\n timewindows = []\n while end - start > datetime.timedelta(seconds=0):\n if start + deltat > end:\n endx = end\n else:\n endx = start + deltat\n timewindows.append([start,endx])\n start += deltat\n results = []\n total = len(timewindows)\n current_idx = 0\n timeold = time.time()\n numofqueries = 0\n engine = sa.create_engine(sql_address)\n Base.metadata.bind = engine\n DBSession = sa.orm.sessionmaker()\n DBSession.bind = engine\n session = DBSession()\n for startx,endx in timewindows:\n\n current_idx += 1\n for i in products:\n repeat = True\n while repeat:\n\n #delay if ratelimts are close\n if numofqueries < 3:\n while time.time() - timeold < 1:\n time.sleep(0.05)\n \n timeold = time.time()\n numofqueries = 0\n try:\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n except:\n time.sleep(30)\n public_client = gdax.PublicClient()\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n numofqueries += 1\n\n #rate limit exceeded has 'message' as dict.\n if not 'message' in alist:\n repeat = False\n for a in alist:\n a[0] = datetime.datetime.fromtimestamp(float(a[0]))\n tmp = i.split('-')\n d = dict(coin = tmp[0], currency = tmp[1], timestamp = a[0], low=a[1], high=a[2], open=a[3], close=a[4], volume=a[5])\n results.append(d)\n lasttimestamp = a[0]\n\n #upload with batch size of 10000\n if len(results) > 10000:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n if len(results) > 0:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n\n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n update.status='complete'\n order = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'order')).first()\n order.status='complete'\n session.commit()", "def sendSensors(self,sensors):\n data = _RobotCommunicator.SENSOR_HEADER\n for index in sensors:\n data = data + pack(_RobotCommunicator.SENSOR_FORMAT,\n index,sensors[index])\n self.udpSock.sendto(data,self.addr)", "def read_sensors(self, sensor_sources, inf):\n sensors = {}\n for fru in self.frus:\n sensors[fru] = get_sensor_tuples(fru, self.nums[fru], sensor_sources, inf)\n\n # read specific sensors\n if self.extra_sensors != {}:\n for sensor_name in self.extra_sensors.keys():\n fru = self.extra_sensors[sensor_name].fru\n sensor = get_sensor_tuples(\n fru, None, {sensor_name: self.extra_sensors[sensor_name]}, None\n )\n # Merge sensors of the same fru\n if fru in self.frus:\n sensors[fru] = {\n **sensors[fru],\n **sensor,\n }\n else:\n sensors[fru] = sensor\n\n Logger.debug(\"Last fan speed : %d\" % self.last_fan_speed)\n Logger.debug(\"Sensor reading\")\n\n # Offset the sensor Temp value\n for key, data in list(sensor_sources.items()):\n sensorname = key.lower()\n\n for fru in self.frus:\n if sensorname in sensors[fru]:\n senvalue = sensors[fru][sensorname]\n Logger.debug(\" {} = {}\".format(sensorname, senvalue.value))\n\n offset = 0\n if data.offset != None:\n offset = data.offset\n elif data.offset_table != None:\n # Offset sensor Temp, relate with current fan speed\n for (fan_speed, offset_temp) in sorted(data.offset_table):\n if self.last_fan_speed > fan_speed:\n offset = offset_temp\n else:\n break\n if offset != 0:\n for fru in self.frus:\n if sensorname in sensors[fru]:\n senvalue = sensors[fru][sensorname]\n # Skip sensor if the reading fail\n if senvalue.value is None:\n continue\n value = senvalue.value + offset\n sensors[fru][sensorname] = senvalue._replace(value=value)\n value = senvalue.value + offset\n Logger.debug(\n \" %s = %.2f (after offset %.2f)\"\n % (sensorname, value, offset)\n )\n return sensors", "def getGPSrecords(t_start,t_end):\n client=connectionDB()\n query = '''\n select device_id as user,x as lon,y as lat,datetime\n from cadi360-sac.kovid_dev.records\n where datetime>=@t_start and datetime<=@t_end and x!=y and x is not null;\n '''\n job_config = bq.QueryJobConfig(\n query_parameters=[\n bq.ScalarQueryParameter('t_start', \"STRING\", t_start),\n bq.ScalarQueryParameter('t_end', \"STRING\", t_end),\n ]\n )\n query_job = client.query(query, job_config=job_config)\n results = query_job.result()\n GPSrecords = results.to_dataframe()\n GPSrecords = gpd.GeoDataFrame(GPSrecords, geometry=gpd.points_from_xy(GPSrecords.lon, GPSrecords.lat))\n return GPSrecords", "def gyroscope_sensor(axis):\n\n\tsensor_name = \"baseBoard\"\n\treg_addr = 24\n\tdata_len = 56\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\t#get sensor data\n\tdata = rospy.wait_for_message('MediumSize/SensorHub/Imu', Imu, 2)\n\tangular_velocity = data.angular_velocity\n\tif axis == \"x\":\n\t\tresult = angular_velocity.x\n\telif axis == \"y\":\n\t\tresult = angular_velocity.y\n\telse:\n\t\tresult = angular_velocity.z\n\n\tdelete_sensor(sensor_name)\n\treturn result", "def query_from(self, temporal):\n raise NotImplementedError()", "def update(self):\n for retry in range(DEFAULT_RETRY):\n try:\n self.HysenData = self._broadlink_device.get_full_status()\n if self.HysenData is not None:\n self._current_temperature = self.HysenData['room_temp']\n self._target_temperature = self.HysenData['thermostat_temp']\n self._min_temp = self.HysenData['svl']\n self._max_temp = self.HysenData['svh']\n self._loop_mode = int(self.HysenData['loop_mode'])-1\n self._power_state = self.HysenData['power']\n self._auto_state = self.HysenData['auto_mode']\n self._is_heating_active = self.HysenData['active']\n\n self.remote_lock = self.HysenData['remote_lock']\n self.auto_override = self.HysenData['temp_manual']\n self.sensor_mode = self.HysenData['sensor']\n self.external_sensor_temprange = self.HysenData['osv']\n self.deadzone_sensor_temprange = self.HysenData['dif']\n self.roomtemp_offset = self.HysenData['room_temp_adj']\n self.anti_freeze_function = self.HysenData['fre']\n self.poweron_mem = self.HysenData['poweron']\n self.external_temp = self.HysenData['external_temp']\n self.clock_hour = self.HysenData['hour']\n self.clock_min = self.HysenData['min']\n self.clock_sec = self.HysenData['sec']\n self.day_of_week = self.HysenData['dayofweek']\n self.week_day = self.HysenData['weekday']\n self.week_end = self.HysenData['weekend']\n\n if self._power_state == HYSEN_POWERON :\n if self._auto_state == HYSEN_AUTOMODE:\n self._current_operation = STATE_AUTO\n else:\n self._current_operation = STATE_HEAT\n else:\n self._target_temperature = self._min_temp\n self._current_operation = STATE_IDLE\n\n except socket.timeout as error:\n if retry < 1:\n _LOGGER.error(\"Failed to get Data from Hysen Device:%s\",error)\n return\n except (vol.Invalid, vol.MultipleInvalid) as error:\n _LOGGER.warning(\"%s %s\",error, error.__str__)\n pass", "def update(self):\n if self._api is not None:\n \"\"\"Fetch the latest data\"\"\"\n self._api.get()\n \n \"\"\"set our sensor values\"\"\"\n self._current_temperature = self._api._temperature\n self._current_humidity = self._api._humidity\n self._target_temperature_high = self._api._coolto\n self._target_temperature_low = self._api._heatto\n self._away = self._api._awaymode\n \n \"\"\"Create a textual representations\"\"\"\n self._current_operation_mode = \"Unknown\"\n if self._api._opmode == 0:\n self._current_operation_mode = 'Off';\n elif self._api._opmode == 1:\n self._current_operation_mode = 'Heat only';\n elif self._api._opmode == 2:\n self._current_operation_mode = 'Cool only';\n elif self._api._opmode == 3:\n self._current_operation_mode = 'Heat & Cool'; \n self._current_fan_mode = \"Unknown\"\n \"\"\"Create a textual representations\"\"\"\n if self._api._fanmode == 0:\n self._current_fan_mode = 'Auto';\n elif self._api._fanmode == 1:\n self._current_fan_mode = 'On';\n elif self._api._fanmode == 2:\n self._current_fan_mode = 'Circulate';\n \"\"\"Create a textual representation\"\"\"\n if self._api._state == 0:\n self._current_state = 'Idle';\n elif self._api._state == 1:\n self._current_state = 'Heating';\n elif self._api._state == 2:\n self._current_state = 'Cooling';", "def sendSensors(self,sensors):\n self.broadcaster.sendSensors(sensors)", "def res(ra, dec, ang):\n query = \"\"\"\n SELECT\n s.ra, s.dec,\n s.dered_g as g, s.dered_r as r,\n s.err_g, s.err_r,\n s.flags\n \n FROM\n dbo.fGetNearbyObjEq({}, {}, {}) AS n\n JOIN Star AS s ON n.objID = s.objID\n \n WHERE\n g - r BETWEEN -0.5 AND 2.5\n AND g BETWEEN 14 and 24\n \"\"\".format(ra,dec,ang)\n \n return SDSS.query_sql(query, timeout = 600)", "def wireless_sensing(self, num_Samples):\n cont_Samples = 0\n while cont_Samples <= num_Samples:\n\n gps_pos = self.dc.read_gps()\n n_samples = 256 # DON'T CHANGE TO 128!!!!! IT CAUSES KERNEL PANIC (unless you change tick or find another fix)\n\n if IS_SIMULATION:\n dBm = self.get_simulated_dBm()\n self.dBm = dBm\n self.sdr_number_sampled += 1\n time.sleep(0.01)\n else:\n self.sdr_number_sampled += 1\n samples = self.sdr.read_samples(n_samples)\n dBm = 10 * np.log10(np.mean(np.power(np.abs(samples), 2)))\n self.dBm = dBm\n\n if self.sdr_number_sampled > self.sdr_samples_to_discard:\n wireless_msg0 = HotspotWirelessMessage(\n \ttriangle_count=self.triangle_count,\n \tvertex=self.vertex,\n location=gps_pos,\n heading=self.dc.vehicle.heading,\n dBm=dBm,\n )\n self.flight_logger.log(wireless_msg0)\n self.wireless_data.append(wireless_msg0)\n cont_Samples += 1" ]
[ "0.78036326", "0.56300086", "0.56219053", "0.54439676", "0.5414489", "0.5369397", "0.53085774", "0.5199531", "0.5161808", "0.51520276", "0.5134034", "0.5122384", "0.510574", "0.50754833", "0.50372785", "0.5034987", "0.5008669", "0.50082725", "0.5000846", "0.49865463", "0.49745566", "0.49654335", "0.49633363", "0.49038783", "0.49025807", "0.49005684", "0.48986858", "0.489094", "0.48893544", "0.48811346", "0.48805237", "0.4853475", "0.48181623", "0.4815827", "0.48133814", "0.4801042", "0.47865856", "0.47855258", "0.4776053", "0.47739312", "0.47715175", "0.47583526", "0.475334", "0.47388208", "0.47378096", "0.47377318", "0.47232094", "0.4722641", "0.47190377", "0.47154796", "0.47143838", "0.47037652", "0.47018763", "0.46920127", "0.466111", "0.4659559", "0.46586984", "0.46562654", "0.46491307", "0.46444267", "0.4643805", "0.46434718", "0.46341622", "0.4633296", "0.4630316", "0.4625043", "0.4620346", "0.46159694", "0.4614929", "0.46119174", "0.46114814", "0.45982802", "0.4597356", "0.45948422", "0.4592812", "0.4591938", "0.45817766", "0.45774665", "0.4574879", "0.45727825", "0.45698732", "0.45656174", "0.45611563", "0.4546419", "0.45460135", "0.45451164", "0.45436433", "0.4542582", "0.45357367", "0.4533041", "0.45269787", "0.4526078", "0.45188284", "0.45183784", "0.4514879", "0.45134163", "0.45060742", "0.4505669", "0.45049614", "0.4499987" ]
0.59702265
1
Leggi dati del traffico da .csv, il file dev'essere nel formato letto da SGL
def read_traffic_sensor_from_csv(path: str) -> pd.DataFrame: df = pd.read_csv(path) df["measuredTime"] = pd.to_datetime(df["measuredTime"]) df.set_index("measuredTime", inplace=True) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def get_data(self, csv_file):\n pass", "def format_porteurs(filepath):\n fieldnames, rows = get_header_rows(filepath)\n\n if \"statut\" in fieldnames:\n fieldnames.append(\"situation_societariat_entrance\")\n fieldnames.append(\"situation_situation\")\n for row in rows:\n statut = row['statut']\n row['situation_societariat_entrance'] = \"\"\n if statut == \"Associé\":\n row['situation_societariat_entrance'] = \"01/01/2015\"\n row['situation_situation'] = PORTEUR_STATUS_MATCH.get(statut)\n\n\n if 'coordonnees_address1' in fieldnames and 'coordonnees_address2' in fieldnames:\n fieldnames.append('coordonnees_address')\n for row in rows:\n row['coordonnees_address'] = row['coordonnees_address1'] + \\\n '\\n' + row['coordonnees_address2']\n\n if \"coordonnees_civilite\" in fieldnames:\n fieldnames.append('coordonnees_sex')\n for row in rows:\n if row['coordonnees_civilite'].lower() == u\"mademoiselle\":\n row['coordonnees_civilite'] = u\"Madame\"\n\n if row['coordonnees_civilite'] == u'Madame':\n row['coordonnees_sex'] = 'F'\n else:\n row['coordonnees_sex'] = 'M'\n\n if \"zus\" in fieldnames:\n fieldnames.append(\"coordonnees_zone_qual\")\n for row in rows:\n if row['zus'] == '1':\n row['coordonnees_zone_qual'] = 'zus'\n\n write_csv_file(filepath, fieldnames, rows)", "def read_csv_file(self):\n pass", "def readCsv(variables, path, pathCsv, estacion):\n # os.makedirs('../data/totalData/')\n dataVa = df.DataFrame()\n variables = variables\n mypath = path\n patron = re.compile(variables + '_'+estacion+'_\\d\\d\\d\\d-\\d\\d-\\d\\d' + '.*')\n for base, dirs, filess in os.walk(mypath, topdown=False):\n filess = sorted(filess)\n for value in filess:\n if patron.match(value) != None:\n tempData = df.read_csv(mypath + value)\n #tempData = completeMet(tempData)\n tempData = tempData.iloc[0:24, :]\n dataVa = concat([tempData, dataVa], axis=0)\n dataVa = dataVa.reset_index()\n dataVa = dataVa.drop(labels='index', axis=1)\n dataVa.to_csv(pathCsv + variables + '_'+ estacion +'_total.csv', encoding='utf-8', index=False)\n dataVa = df.DataFrame()", "def ouvrir_fichier():\r\n df = pandas.read_csv(\r\n 'ong.csv',\r\n header=2,\r\n names=[\r\n 'id',\r\n 'country',\r\n 'year',\r\n 'emissions',\r\n 'value',\r\n 'footnotes',\r\n 'source'\r\n ]\r\n )\r\n if df is None:\r\n return abort(404)\r\n else:\r\n return df", "def _set_grille_csv(self):\n with open(self.csvPath, \"r\") as csvFile:\n fileRead = csv.reader(csvFile, delimiter=\",\")\n\n #We read each row of the csv file\n for row in fileRead:\n rowSplitted = row[0].split(\";\")\n self._grilleCSV.append(rowSplitted)", "def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data", "def export_data(self):\r\n \r\n \r\n output_file = 'export.csv'\r\n data = self.get_raw_data()\r\n \r\n if data != []:\r\n print('Writing to file', output_file)\r\n with open(output_file, 'w',) as csvfile:\r\n fluorescence_levels = csv.writer(csvfile)\r\n fluorescence_levels.writerow(['sensor_1','Time'])\r\n for i in data:\r\n fluorescence_levels.writerow(i)\r\n print('done')\r\n \r\n else:\r\n print('no recorded data')", "def makeCsv(net, date, opt, path, minlat, maxlat, minlon, maxlon, variables, estaciones):\n\n # data_lon = Dataset('/ServerData/KRAKEN/Reanalisis/a1979/wrfout_c15d_d01_1979-08-15_00:00:00.1979')\n # LON = data_lon.variables['XLONG'][:]\n # LAT = data_lon.variables['XLAT'][:]\n #\n # LON = LON[0][0]\n # LAT = LAT[0]\n #\n # LONsize = len(LON)\n # LATsize = len(LAT)\n #\n # celda = []\n var_cut = []\n for i in variables:\n var = net.variables[i][:,int(minlat):int(maxlat),int(minlon):int(maxlon)]\n #print(LON)\n #print(var)\n #return\n # celda.append(var)\n # result = ne(var, LON, LAT, LONsize, LATsize, minlat, maxlat, minlon, maxlon)\n var_cut.append(var)\n\n for ls in range(len(var_cut)):\n saveData(var_cut[ls], variables[ls], date, opt, path, estaciones)", "def upload_csv_data(self, upload_file):\n db = DataBase(self.DATABASE_DATA)\n db.insert_data_from_file(\n 'triagedata.historicdata',\n ('clinic_id', 'severity', 'date_received', 'date_seen'),\n upload_file,\n ','\n )", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def loadCSV(input_file):", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def dbtocsv():\n connection = sqlite3.connect(\"sensordata.db\")\n cursor = connection.cursor()\n cursor.execute(\"Select * from sensordata\")\n roadstationdata = cursor.fetchall()\n\n with open('roadstationdata.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id','name','value','unit','time'])\n writer.writerows(roadstationdata)", "def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data", "def init_csv_file(self):\n folder = \"/home/pi/data/\" + datetime.now().strftime(\"%Y_%m_%d\") + \"/\"\n if not os.path.isdir(folder):\n # append 'a' to the folder name until we find a name that does not exist\n while os.path.exists(folder):\n folder = folder[:-1] + \"a\" + \"/\"\n os.mkdir(folder)\n filename = folder + 'particledata_' + datetime.now().strftime (\"%H-%M-%S\") \n while os.path.exists(filename):\n filename = filename + '_a'\n filename += '.csv'\n log.info('Writing data to: ' + filename)\n self.file = open(filename, \"w\")\n self.file.write('Unix Time;Human Readable Time;pm 2.5;pm 10;Has Fix;Longitude;Latitude;Altitude;GPS Unix Time\\n')\n self.file.flush()\n self.synced_time = False", "def make_csv(file_of_data):\n with open(file_of_data, 'w') as f:\n writer = csv.writer(f)\n header = (\"Counter\", \"Date/time\", \"Latitude\", \"Longitude\", \"Temperature\", \"Humidity\")\n writer.writerow(header)", "def readCSV(filename):\r\n data = list( csv.reader(open('HW_08_DBScan_Data_NOISY_v300.csv','r'),delimiter=','))\r\n for dIdx in range(len(data)):\r\n data[dIdx] = [float(data[dIdx][0]),float(data[dIdx][1]),float(data[dIdx][2])]\r\n #print(data[0])\r\n return data", "def lectcsv(NOM):\n #NOM=input(\"nom du fichier:\")#interactif\n #NOM=str(NOM +\".csv\")\n c=[NOM]\n #ouverture du fichier et recuperation du contenu\n with open(NOM) as f:\n contenu = csv.reader(f, delimiter=' ', quotechar='|')\n for row in contenu:\n c.append(row[0].split(';'))#separation du string \n return c", "def exportcsvsumdata(self, log):\r\n csvdata= None\r\n\r\n if (log):\r\n csvdata = ('%s\\t'%(log['CALLSIGN']))\r\n csvdata += ('%s\\t'%(log['OPERATORS']))\r\n csvdata += ('%s\\t'%(log['LOCATION']))\r\n csvdata += ('%d\\t'%(log['COUNT']))\r\n csvdata += ('%s\\t'%(log['NAMES']))\r\n if(log['LASTWORKED']): \r\n csvdata += ('%s/%s UTC'%(log['LASTWORKED'],\r\n log['LWTIME'])) \r\n\r\n return csvdata", "def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def get_data():\r\n data = pd.read_csv(FILE_PATH)\r\n # Replace 'Zero KM' by year 2022 assuming it's a new car\r\n data['Ano'] = data['Ano'].str.replace('Zero KM', '2021').replace('2022', '2021')\r\n data['Ano'] = data['Ano'].astype(int)\r\n data['Automático'] = data['Automático'].astype(int)\r\n return data", "def guardar_CSV(self):\n participantes = self.__disparos.copy()\n archivo = input(\"Ingrese nombre del archivo: \")\n with open(f\"{archivo}.txt\", 'a') as csv_file:\n campos = ['idDisparo', 'nroParticipante', 'nombre', 'apellido', 'edad', 'sexo', 'disparos', 'mejor_disparo', 'promedio', 'puntaje_total']\n csv_writer = csv.DictWriter(csv_file, fieldnames=campos)\n csv_writer.writeheader()\n for linea in participantes:\n csv_writer.writerow(linea)\n print(\n f\"\"\"\n ==========================================\n == SE HAN GUARDADO LOS DATOS ==\n ==========================================\n \"\"\"\n )", "def lecture_fichier(path_data_frame):\n logger = logging.getLogger('Lecture du fichier')\n log = \"#### DPNMaker 1.0..............\\n### Mirna Marie-Joseph, Théo Gauvrit, Kévin Merchadou\\n#### Date : 1 avril 2019\\n\\n\"\n log = log + \"Ouverture du fichier.......................................\\n\"\n log = log + \"Chargement des données.......................................\\n\"\n Iterateur = 2\n Donnees_Mere = []\n Donnees_Foetus = []\n Donnees_Pere = []\n Donnees_na = pd.read_csv(path_data_frame, sep='\\t', header=0)\n Donnees = Donnees_na.replace(np.nan, 0.0, regex=True)\n if (Donnees.shape[0] > 32):\n Iterateur = 3\n num_pere = Donnees[\"Sample Name\"].values[2]\n Allele_na = Donnees[[\"Allele 1\", \"Allele 2\", \"Allele 3\"]].values\n Hauteur_na = Donnees[[\"Height 1\", \"Height 2\", \"Height 3\"]].values\n Date_echantillon = re.search(\"(\\d{4}-\\d{2}-\\d{2})\", Donnees[\"Sample File\"].values[0]).group()\n Nom_echantillon = Donnees[\"Sample Name\"].values[0]\n num_foetus = Donnees[\"Sample Name\"].values[1]\n Allele, Hauteur, log = homogeneite_type(Allele_na, Hauteur_na, log)\n for ligne in range(0, Donnees.shape[0] - 1, Iterateur):\n M = Mere(Donnees[\"Marker\"][ligne], Allele[ligne],\n Hauteur[ligne], None, None)\n F = Foetus(Donnees[\"Marker\"][ligne], Allele[ligne + 1],\n Hauteur[ligne + 1], None, None, num_foetus, None, None)\n if (Iterateur == 3):\n P = Pere(Donnees[\"Marker\"][ligne],\n Allele[ligne + 2], Hauteur[ligne + 2], None, num_pere, None)\n Donnees_Pere.append(P)\n Donnees_Mere.append(M)\n Donnees_Foetus.append(F)\n Echantillon_F = Echantillon(Date_echantillon, Nom_echantillon, F)\n log = log + \"Donnees chargees.......................................\\n\"\n return Donnees_Mere, Donnees_Foetus, Donnees_Pere, Echantillon_F, log", "def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()", "def load_CSV_data(path):\n return np.genfromtxt(os.path.join('data/traffic_data', path))", "def export_csv( self, db_device_adapters, db_start, db_end, min_points = 10, csv_file_name = \"data.csv\" ):\n msg = f\"Export data to csv file {csv_file_name}...\"\n AppGlobal.gui.display_info_string( msg )\n sep = \"\\t\"\n for i_device_adapter in db_device_adapters:\n #time_data, inst_pw_data, total_power_data, = self._prep_data( i_device_adapter, db_start, db_end, min_points )\n i_device_adapter.retrived_data_cache = self._prep_data( i_device_adapter, db_start, db_end, min_points )\n time_data, inst_pw_data, total_power_data, = i_device_adapter.retrived_data_cache\n\n device_name = i_device_adapter.name\n\n if time_data is None:\n msg = f\"No data for {device_name}.\"\n AppGlobal.gui.display_info_string( msg )\n else:\n with open( csv_file_name, \"a\" ) as a_file: # we are appending\n a_file.write( f'\"device\"{sep}\"time_data\"{sep}\"inst_pw_data\"{sep}\"total_power_data\"\\n' )\n for ix_list, i_time in enumerate( time_data ):\n a_file.write( f\"{device_name}{sep}{time_data[ ix_list ]}{sep}{inst_pw_data[ ix_list ]}{sep}{total_power_data[ ix_list ]}\\n\" )\n\n msg = f\"...CSV file complete.\"\n AppGlobal.gui.display_info_string( msg )", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def __loaddata(filename, datatype='flightcsv', minprob=0.001, maxprob=0.20):\n if datatype is 'flightcsv':\n return extract_flight_csv(filename, minprob=minprob, maxprob=maxprob)\n else:\n raise Exception('unknown datatype %s' % datatype)", "def CSV_Load_File( self, infilename ):\n print( 'Loading \"{}\"'.format(infilename) )\n IN = open( infilename, 'r' )\n standname = None\n laststand = None\n for L in IN:\n if( L[0:9] == 'Site/Plot' ): continue\n col = L.split( ',' )\n standname = col[0]\n year = int(col[1])\n #if( re.search( '-', standname ) != None ):\n # loc = re.search( '-', standname )\n # year = int(standname[loc.start()+1:])\n # standname = standname[0:loc.start()]\n #print standname, year\n if( (standname != None ) & (standname != laststand) ): self.Data.Stand[standname] = StandData( standname )\n (treeno, species, dbh, ht, live, status, cclass, tpa) = \\\n (int(col[2]), col[3], float(col[4]), float(col[5]), col[6], col[7], int(float(col[8])), float(col[9]))\n if( OPT['d'] ):\n if( dbh > 10.0 ): dbh *= 1.25\n if( dbh > 15.0 ): dbh *= 1.50\n for t in range( 1, int( math.ceil( tpa ))+1, 1 ):\n ntree = len( self.Data.Stand[standname].Tree ) + 1\n self.Data.Stand[standname].Tree[ntree] = TreeData( species, TreeNumber=treeno )\n self.Data.Stand[standname].Tree[ntree].Year[year] = MeasurementData( dbh, ht, '', 1, live, status, cclass )\n laststand = standname\n IN.close()", "def load_obs_csv(self, csv_file, date_fmt=\"%Y/%m/%d %H:%M\", mission_lst=None, only_geom=False):\n\n try:\n obs_data = np.loadtxt(csv_file, delimiter=',', dtype='str')\n msg = \"observation data loaded from file ***{}***\".format(csv_file)\n FileLogger.info(msg)\n except IOError as exc:\n msg = \"could not load observations from csv file ***{}***\".format(csv_file)\n msg += \" ({})\".format(exc)\n FileLogger.fatal(msg)\n raise RuntimeError(msg)\n\n nt,ncol = obs_data.shape\n date_lst = [ dt.datetime.strptime(obs_data[i,0], date_fmt) for i in xrange(nt) ]\n date_a = np.array(date_lst)\n time_start_data = date_lst[0]\n time_end_data = date_lst[-1]\n #-- logging\n msg = \"detected ntimepts={} #columns={} in csv file\".format(nt, ncol)\n FileLogger.info(msg)\n\n #-- potential adjustment to specified temporal domain\n if self.time_start!=None:\n time_start = self.time_start\n else:\n time_start = time_start_data\n if self.time_end!=None:\n time_end = self.time_end\n else:\n time_end = time_end_data\n\n #-- first 8 columns are always:date, vza, vaa, sza, saa, sat_flag, lat, lon\n\n if ncol==10:\n msg = \"start reading S1 observations...\"\n FileLogger.info(msg)\n # date, vza, vaa, sza, saa, sat_flag, lat, lon, vh, vv\n vh_lst = []\n vv_lst = []\n self.obs_dct['S1'] = ObsTable()\n self.obs_dct['S1'].geom = satgeo.SensorGeometry()\n self.obs_dct['S1'].sat_id_lst = []\n #-- abreviate\n sat_geom = self.obs_dct['S1'].geom\n sat_geom.date_utc = []\n sat_geom.vza = []\n sat_geom.vaa = []\n sat_geom.sza = []\n sat_geom.saa = []\n for i,act_date in enumerate(date_lst):\n if act_date<time_start:\n continue\n elif act_date>time_end:\n break\n #-- actual satellite/mission\n act_mission = obs_data[i,7].upper()\n if mission_lst!=None and not act_mission in mission_lst:\n msg = \"observation at date {} is from mission={} and ignored here.\".format(\n act_date.strftime('%Y-%m-%dT%H:%M'), act_mission)\n FileLogger.info(msg)\n continue\n #-- read actual geometry\n sat_geom.date_utc.append(act_date)\n sat_geom.vza.append( float(obs_data[i,1]) )\n sat_geom.vaa.append( float(obs_data[i,2]) )\n sat_geom.sza.append( float(obs_data[i,3]) )\n sat_geom.saa.append( float(obs_data[i,4]) )\n #-- lon,lat (columns 5,6) not needed\n #-- satellite flag (column 7)\n self.obs_dct['S1'].sat_id_lst.append(act_mission)\n #-- VH,VV in 0-indexed columns 8,9\n vh_lst.append( float(obs_data[i,8]) )\n vv_lst.append( float(obs_data[i,9]) )\n\n #-- geometries/satellite flags are done here\n if only_geom:\n return\n\n #-- turn into arrays\n vh = np.array(vh_lst)\n vv = np.array(vv_lst)\n #-- logging\n msg = \"observational backscatter values are assumed to be in linear units!\"\n FileLogger.info(msg)\n msg = \"VH backscatter values read: VH[linear] min/max={}/{}\".format(\n vh.min(), vh.max())\n FileLogger.info(msg)\n msg = \"VV backscatter values read: VV[linear] min/max={}/{}\".format(\n vv.min(), vv.max())\n FileLogger.info(msg)\n #-- uncertainty computation\n #-- XX_db = XX_db(XX) = 10*log10(XX)\n #-- XX = XX(XX_db) = 10**(XX_db/10)\n #\n # for the uncertainty in linear/raw unit we apply conservative estimation:\n # 2*sXX = [ XX(XX_db+sXX_db) - XX(XX_db-sXX_db) ] (XX=VH,VV)\n # = [ XX(XX_db)*10**(sXX_db/10.) - XX(XX_db)*10**(-sXX_db/10.)]\n # = XX(XX_db)*[10**(sXX_db/10.) - 10**(-sXX_db/10.)]\n # = XX * [10**(sXX_db/10.) - 10**(-sXX_db/10.)]\n ds = 0.5* (10**(self.s1_unc_db/10.) - 10**(-1*self.s1_unc_db/10.))\n #-- S1 uncertainty floor *may* be user-supplied\n if self.s1_vv_uncfloor!=None:\n dsvv_floor = self.s1_vv_uncfloor\n else:\n dsvv_floor = 10**(self.s1_floor_db/10.)*ds\n if self.s1_vh_uncfloor!=None:\n dsvh_floor = self.s1_vh_uncfloor\n else:\n dsvh_floor = 10**(self.s1_floor_db/10.)*ds\n msg = \"assuming S1 observational uncertainty of {} [dB] \".format(self.s1_unc_db)\n msg += \"yields relative uncertainty of {} [linear unit].\".format(ds)\n FileLogger.info(msg)\n msg = \"assuming vv={} vh={} S1 observational uncertainty floor [linear unit].\".format(\n dsvv_floor, dsvh_floor)\n FileLogger.info(msg)\n svh = np.maximum(vh*ds, dsvh_floor)\n svv = np.maximum(vv*ds, dsvv_floor)\n #-- apply floor value\n nlo_svh = np.count_nonzero(vh*ds<dsvh_floor)\n nlo_svv = np.count_nonzero(vv*ds<dsvv_floor)\n svh = np.maximum(svh, dsvh_floor)\n svv = np.maximum(svv, dsvv_floor)\n msg = \"number of applied uncertainty floor values on VH={} VV={}\".format(\n nlo_svh, nlo_svv)\n FileLogger.info(msg)\n msg = \"determined VH uncertainty in linear units, min/max={}/{}\".format(\n svh.min(), svh.max())\n FileLogger.info(msg)\n msg = \"determined VV uncertainty in linear units, min/max={}/{}\".format(\n svv.min(), svv.max())\n FileLogger.info(msg)\n #-- potential filtering of polarisations\n if not self.s1_pol is None:\n if not 'VH' in self.s1_pol:\n vh = self.obs_fill_value\n svh = self.obs_fill_value\n if not 'VV' in self.s1_pol:\n vv = self.obs_fill_value\n svv = self.obs_fill_value\n #-- \n nt_use = len(sat_geom.date_utc)\n self.obs_dct['S1'].data = np.empty((nt_use,2), dtype=np.float64) #-- 'VH','VV'\n self.obs_dct['S1'].data[:,0] = vh\n self.obs_dct['S1'].data[:,1] = vv\n self.obs_dct['S1'].dataunc = np.empty((nt_use,2), dtype=np.float64)\n self.obs_dct['S1'].dataunc[:,0] = svh\n self.obs_dct['S1'].dataunc[:,1] = svv\n #-- logging\n msg = \"...reading S1 observations DONE\"\n FileLogger.info(msg)\n else:\n #-- logging\n msg = \"start reading S2 observations...\"\n FileLogger.info(msg)\n # date, vza, vaa, sza, saa, sat_flag, lat, lon, BRF1,...,BRF13\n self.obs_dct['S2'] = ObsTable()\n self.obs_dct['S2'].geom = satgeo.SensorGeometry()\n self.obs_dct['S2'].sat_id_lst = []\n #-- abreviate\n sat_geom = self.obs_dct['S2'].geom\n sat_geom.date_utc = []\n sat_geom.vza = []\n sat_geom.vaa = []\n sat_geom.sza = []\n sat_geom.saa = []\n brf_lst = [ [] for i in xrange(NB_S2) ] #-- prepare lists for 13 BRF bands\n for i,act_date in enumerate(date_lst):\n if act_date<time_start:\n continue\n elif act_date>time_end:\n break\n #-- actual satellite/mission\n act_mission = obs_data[i,7].upper()\n if mission_lst!=None and not act_mission in mission_lst:\n msg = \"observation at date {} is from mission={} and ignored here.\".format(\n act_date.strftime('%Y-%m-%dT%H:%M'), act_mission)\n FileLogger.info(msg)\n continue\n #-- read actual geometry\n sat_geom.date_utc.append(act_date)\n sat_geom.vza.append( float(obs_data[i,1]) )\n sat_geom.vaa.append( float(obs_data[i,2]) )\n sat_geom.sza.append( float(obs_data[i,3]) )\n sat_geom.saa.append( float(obs_data[i,4]) )\n #-- lon/lat in columns 5, 6 not used here\n #-- satellite flag\n self.obs_dct['S2'].sat_id_lst.append(obs_data[i,7])\n #-- BRFs start at 0-indexed column 8 in data csv file\n for ib in xrange(NB_S2):\n icol = ib+8\n brf_lst[ib].append( float(obs_data[i, icol]) )\n\n #-- geometries/satellite flags are done here\n if only_geom:\n return\n #--\n nt_use = len(sat_geom.date_utc)\n brf_data = np.empty((nt_use,NB_S2), dtype=np.float64) #-- BRF1-13\n for ib in xrange(NB_S2):\n brf_data[:,ib] = np.array(brf_lst[ib])\n #-- check observational consistency\n nneg = np.count_nonzero( brf_data<0 )\n if nneg>0:\n msg = \"detected negative BRF values: nneg={}.\".format(nneg)\n msg += \" These will be set to fill-value!\"\n FileLogger.warn(msg)\n brf_data[ brf_data<0 ] = self.obs_fill_value\n nhi = np.count_nonzero( brf_data>1 )\n if nhi>0:\n msg = \"detected high BRF outlier values>1: nout={}.\".format(nhi)\n msg += \" These will be set to fill-value!\"\n FileLogger.warn(msg)\n brf_data[ brf_data>1 ] = self.obs_fill_value\n\n #-- data uncertainty\n msg = \"BRF uncertainty is derived by applying {} relative uncertainty, \".format(\n self.s2_relunc)\n msg += \"and an uncertainty floor value of {}\".format(self.s2_uncfloor)\n FileLogger.info(msg)\n brf_dataunc = np.maximum(brf_data*self.s2_relunc, self.s2_uncfloor)\n brf_dataunc[ brf_dataunc<0 ] = self.obs_fill_value\n brf_dataunc[ brf_data==self.obs_fill_value ] = self.obs_fill_value\n #-- restriction to seleted bands\n if not self.s2_bnds is None:\n bnd_msk = np.ones((NB_S2,), dtype=np.bool)*True\n bnd_msk[self.s2_bnds] = False\n brf_data[:,bnd_msk] = self.obs_fill_value\n brf_dataunc[:,bnd_msk] = self.obs_fill_value\n #-- set into structure\n self.obs_dct['S2'].data = brf_data\n self.obs_dct['S2'].dataunc = brf_dataunc\n #-- logging\n msg = \"...reading S2 observations DONE\"\n FileLogger.info(msg)", "def to_csv(fileFrom, fileTo):\n content = []\n\n with open(fileFrom) as f:\n for line in f.readlines():\n content.append(list(filter(lambda x: x != '0', line.split()))[:8])\n\n frame = pd.DataFrame(content)\n frame.columns = ['Station', 'Year', 'Month', 'Day', 'T_min', 'T_avg', 'T_max', 'Precipitation']\n frame.to_csv(fileTo)\n print(\"Data frame is writen to csv\")", "def load(*args):\r\n\r\n #args[0].to_csv(str(PATH.joinpath('./data/{}.csv'.format(args[1]))),index=False)\r\n\r\n try: # it will fail if duplicates\r\n args[0].to_sql('cmf', con=engine, if_exists='append', index=False)\r\n except:\r\n pass", "def load_csv(fichero):\r\n data = np.loadtxt(fichero, delimiter=',')\r\n X = data[:,:-1]\r\n y = data[:,-1]\r\n return X, y", "def get_data(path: str = \"\") -> List[pd.DataFrame]:\r\n X = pd.read_csv(\"log2.csv\")\r\n y = X[[\"Action\"]]\r\n X = X.drop(\"Action\", axis=1)\r\n return [X, y]", "def gen_csv_line(dt, data_type, ss_code, ss_config, bin_num, beam_num, blank, bin_size, value):\n #dt_str = dt.strftime(Ensemble.CSV_DATETIME_FORMAT)\n dt_str = dt.isoformat()\n\n bin_depth = Ensemble.get_bin_depth(blank, bin_size, bin_num)\n\n return \"{},{},{},{},{},{},{},{}\".format(dt_str, data_type, ss_code, ss_config, bin_num, beam_num, bin_depth, value)", "def csv_out(d):\n\theaders = ('쿠션, 파운데이션, 컨실러, 파우더, 블러쉬/블러셔/브론징, 컨투어링/하이라이터, 프라이머, UV프로텍터, 아이브로우, 아이라이너, 마스카라, 섀도우/글리터, 립/립스틱/틴트, 립케어/립밤/립글로스/립 오일, 스킨/토너/토닉, 로션/에멀젼, 미스트, 기타, zzz').split(',')\n\tglobal line\n\twith open('pony_refinedddd.csv', 'w', encoding='EUC-KR') as csv_file:\n\t\tcsvf = csv.writer(csv_file, delimiter=',')\n\t\tcsvf.writerow(headers)\n\t\tfor y in range(len(d)):\n\t\t\tfor z in range(len(d[y])):\n\t\t\t\t'''\n\t\t\t\tline = [d[y]['스킨/토너/토닉'], d[y]['로션/에멀젼'], \n\t\t\t\t\t\td[y]['미스트'], d[y]['기타'], d[y]['쿠션'],\n\t\t\t\t\t\td[y]['파운데이션'], d[y]['컨실러'],d[y]['파우더'], d[y]['블러쉬/블러셔/브론징'],\n\t\t\t\t\t\td[y]['컨투어링/하이라이터'], d[y]['프라이머'], d[y]['UV프로텍터'],\n\t\t\t\t\t\td[y]['아이브로우'],d[y]['아이라이너'], d[y]['마스카라'], d[y]['섀도우/글리터'],\n\t\t\t\t\t\td[y]['립/립스틱/틴트'], d[y]['립케어/립밤/립글로스/립 오일'], d[y]['zzz']]\n\t\t\t\t'''\n\n\t\t\t\tline.append(d[y][z])\n\t\t\tcsvf.writerow(line)\n\t\t\tline=[]", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def export_sensor_data_to_csv(self):\n df = pd.read_sql('SELECT * FROM sensor_data', self.conn)\n df.to_csv('output/sensor_data.csv', index=False)", "def dump_csv():\n df = helper.load_dataframe('asintosku').reset_index()\n df['min'] = None\n df['max'] = None\n df.asin = df.asin + np.where(\n df.isprime == 0, '_seller', '_prime')\n del df['isprime']\n dfold = load_csv()\n merged = dfold.append(df, ignore_index=True, sort=True).sort_values(\n 'min', ascending=False).drop_duplicates(['seller_sku'])\n merged[['asin', 'mean', 'min', 'max', 'seller_sku']].to_csv(\n datafolder+filename, index=False)", "def _csv_download(page):\n # gc = gspread.login(page.timetable.google_user, page.timetable.google_passwd)\n gc = googleoauth.authenticate_google_docs()\n csv_file = gc.open('WebValley2019')\n\n # gsession = gss.Client(page.timetable.google_user, page.timetable.google_passwd)\n # ss = gss.Spreadsheet(page.timetable.spreadsheet)\n # csv_file = gsession.download(ss, gid=page.timetable.spreadsheet_gid)\n # read = csv_file.read()\n read = csv_file.worksheet('TIMETABLE').get_all_values()\n # print \"csv\", read\n return read", "def convert_to_csv(device, signal):\n subject_counter = 0\n for subject_ID in range(1600, 1651):\n data = pd.read_csv(f'./raw/{device}/{signal}/data_{subject_ID}_{signal}_{device}.txt', sep=\",\", header=None)\n data.columns = [\"subject_ID\", \"activity_ID\", \"Timestamp\", f\"x_{device}_{signal}\", f\"y_{device}_{signal}\",\n f\"z_{device}_{signal}\"]\n saveing_directory = f'{device}_{signal}/S{subject_counter}_{device}_{signal}.csv'\n data.to_csv(saveing_directory)\n subject_counter += 1\n print(subject_counter)", "def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values", "def pull_data(raw_file, out_file):\n ifile = codecs.open(raw_file, 'r', encoding='utf_16_le')\n ofile = open(out_file, 'w')\n writer = csv.writer(ofile, delimiter=',', quoting=csv.QUOTE_NONE, lineterminator='\\n')\n # print \"Processing: \", raw_file\n\n is_header = True\n for row in ifile:\n data_row = row\n if not is_header:\n data_row = parse_row(row)\n data_rows.append(data_row)\n else:\n is_header = False\n\n for a_row in data_rows:\n writer.writerow(a_row)\n\n ifile.close()\n ofile.close()", "def csv(self, destination_path):\n # todo - test for single and duplicate base cases\n to_csv(self._axl_data, destination_path)", "def to_csv(data_path):\n news_df, price_df = load_data(data_path)\n\n combined_df = combine_stock_news(news_df, price_df)\n\n combined_df.to_csv(data_path + \"news_price_df.csv\")", "def export_to_csv(self, log):\n if os.path.isfile(self.GENERATE_FILE):\n os.remove(self.GENERATE_FILE)\n\n with open(self.GENERATE_FILE, \"w\") as f:\n f.write(\"date, time, username, succes, label\\n\")\n\n for entry in log:\n f.write(str(entry[0].date()) + \", \"\n + str(self.hms_to_seconds(entry[0])) + \", \"\n + str(entry[1]) + \", \"\n + str(entry[2]) + \", \"\n + str(entry[3])\n + \"\\n\")", "def exportCSV(self, log, csvFile):\n return 0", "def _read_csvs(self):\n self.data = pd.read_csv(self.path+self.name, index_col=0)", "def get_data():\n return pd.read_csv(\n 'cetml1659on.dat',\n skiprows=6,\n sep='\\s+',\n na_values=['-99.9', '-99.99'],\n )", "def checkFile(net, name, date, opt, path, minlat, maxlat, minlon, maxlon, variables,estaciones):\n try:\n #net.variables['XLONG'][:]\n #net.variables['XLAT'][:]\n #print(net.variables['XLAT'][:])\n makeCsv(net, date, opt, path, minlat, maxlat, minlon, maxlon, variables, estaciones)\n except KeyError:\n print('error in file: ' + name)", "def guardar_datos(self, archivo):\r\n with open(archivo, \"wb\") as csv_file:\r\n writer = csv.writer(csv_file, delimiter=';')\r\n writer.writerow(\r\n [\"Camion\", \"Carga\", \"Tipo\", \"Peso Final\", \"Operacion\", \"Recurso\", \"Dia\",\r\n \"Arribo\", \"Espera M. O/D\", \"Espera R\", \"Espera P.\", \"Espera T.\", \"Inicio\", \"Fin\",\r\n \"Medio de Almacenamiento\", \"Nivel\"])\r\n for linea in self.datos:\r\n writer.writerow(linea)", "def format_data(file):\r\n \r\n \r\n data = pd.read_csv(file)\r\n data.index = list(data.iloc[:,0])\r\n data = data.iloc[:,1:]\r\n \r\n return data", "def new_csv_imp(infile):\r\n with open(infile, \"r\") as fd:\r\n txt = fd.readlines()\r\n if len(txt) > 1:\r\n if 'Serial' in txt[0]:\r\n print('{:} is Solinst'.format(infile))\r\n if 'UNIT: ' in txt[7]:\r\n level_units = str(txt[7])[5:].strip().lower()\r\n if 'UNIT: ' in txt[12]:\r\n temp_units = str(txt[12])[5:].strip().lower()\r\n f = pd.read_csv(infile, skiprows=13, parse_dates=[[0, 1]], usecols=[0, 1, 3, 4])\r\n print(f.columns)\r\n f['DateTime'] = pd.to_datetime(f['Date_Time'], errors='coerce')\r\n f.set_index('DateTime', inplace=True)\r\n f.drop('Date_Time', axis=1, inplace=True)\r\n f.rename(columns={'LEVEL': 'Level', 'TEMP': 'Temp'}, inplace=True)\r\n level = 'Level'\r\n temp = 'Temp'\r\n\r\n if level_units == \"feet\" or level_units == \"ft\":\r\n f[level] = pd.to_numeric(f[level])\r\n elif level_units == \"kpa\":\r\n f[level] = pd.to_numeric(f[level]) * 0.33456\r\n printmes(\"Units in kpa, converting {:} to ft...\".format(os.path.basename(infile)))\r\n elif level_units == \"mbar\":\r\n f[level] = pd.to_numeric(f[level]) * 0.0334552565551\r\n elif level_units == \"psi\":\r\n f[level] = pd.to_numeric(f[level]) * 2.306726\r\n printmes(\"Units in psi, converting {:} to ft...\".format(os.path.basename(infile)))\r\n elif level_units == \"m\" or level_units == \"meters\":\r\n f[level] = pd.to_numeric(f[level]) * 3.28084\r\n printmes(\"Units in psi, converting {:} to ft...\".format(os.path.basename(infile)))\r\n else:\r\n f[level] = pd.to_numeric(f[level])\r\n printmes(\"Unknown units, no conversion\")\r\n\r\n if temp_units == 'Deg C' or temp_units == u'\\N{DEGREE SIGN}' + u'C':\r\n f[temp] = f[temp]\r\n elif temp_units == 'Deg F' or temp_units == u'\\N{DEGREE SIGN}' + u'F':\r\n printmes('Temp in F, converting {:} to C...'.format(os.path.basename(infile)))\r\n f[temp] = (f[temp] - 32.0) * 5.0 / 9.0\r\n return f\r\n\r\n elif 'Date' in txt[1]:\r\n print('{:} is Global'.format(infile))\r\n f = pd.read_csv(infile, skiprows=1, parse_dates=[[0, 1]])\r\n # f = f.reset_index()\r\n f['DateTime'] = pd.to_datetime(f['Date_ Time'], errors='coerce')\r\n f = f[f.DateTime.notnull()]\r\n if ' Feet' in list(f.columns.values):\r\n f['Level'] = f[' Feet']\r\n f.drop([' Feet'], inplace=True, axis=1)\r\n elif 'Feet' in list(f.columns.values):\r\n f['Level'] = f['Feet']\r\n f.drop(['Feet'], inplace=True, axis=1)\r\n else:\r\n f['Level'] = f.iloc[:, 1]\r\n # Remove first and/or last measurements if the transducer was out of the water\r\n # f = dataendclean(f, 'Level')\r\n flist = f.columns.tolist()\r\n if ' Temp C' in flist:\r\n f['Temperature'] = f[' Temp C']\r\n f['Temp'] = f['Temperature']\r\n f.drop([' Temp C', 'Temperature'], inplace=True, axis=1)\r\n elif ' Temp F' in flist:\r\n f['Temperature'] = (f[' Temp F'] - 32) * 5 / 9\r\n f['Temp'] = f['Temperature']\r\n f.drop([' Temp F', 'Temperature'], inplace=True, axis=1)\r\n else:\r\n f['Temp'] = np.nan\r\n f.set_index(['DateTime'], inplace=True)\r\n f['date'] = f.index.to_julian_date().values\r\n f['datediff'] = f['date'].diff()\r\n f = f[f['datediff'] > 0]\r\n f = f[f['datediff'] < 1]\r\n # bse = int(pd.to_datetime(f.index).minute[0])\r\n # f = hourly_resample(f, bse)\r\n f.rename(columns={' Volts': 'Volts'}, inplace=True)\r\n f.drop([u'date', u'datediff', u'Date_ Time'], inplace=True, axis=1)\r\n return f\r\n else:\r\n print('{:} is unrecognized'.format(infile))", "def hoomdlog(filename):\r\n\r\n data = pd.read_csv(filename, sep = '\\s+')\r\n return data", "def csvOutput(cycle, fctimes, beachdata, offshoredata, surfdata, fname='isurf_output.csv', outdir='.'):\n\n datestr = cycle.strftime('%Y%m%d00')\n\n with open(outdir+'/%s' %fname,'w') as outp:\n outp.write(datestr+'\\r\\n')\n for isite in range(len(beachdata['name'])):\n outp.write('\\r\\n')\n outp.write('%s' %beachdata['name'][isite] + '\\r\\n')\n outp.write('%d' %beachdata['type'][isite] + '\\r\\n')\n #outp.write('TI Hsmo Tpmo Dmo Hseq Tpeq DmEq Hsbr Dpbr\\r\\n')\n #outp.write('LT,Wspd,Wdir,Hsmo,Tpmo,Dmo,Tide,Hseq,Tpeq,DmEq,Hsbr,Dpbr,Hlbr,Hhbr,BT\\r\\n')\n outp.write('LT,Wspd,Wdir,Hsmo,Tpmo,Dmo,Hseq,Tpeq,DmEq,Hsbr,Dpbr,Hlbr,Hhbr,BT\\r\\n')\n\n\t # write out to file\n for itime in range(len(fctimes)):\n\n # write out the data values to file\n\t #outp.write ('%02d' %fctimes[lp] + ' %4.2f %4.1f %3d' %tuple([hm0[lp,isite], tp[lp,isite], dirn[lp,isite]]) + \\\n # ' %4.2f %4.1f %3d' %tuple([hsshwd[lp,isite], tpshwd[lp,isite], reldir[lp,isite]]) + ' %4.2f %4.2f' %tuple([hsbkinit[lp,isite], dpsat[lp,isite]]) + '\\r\\n')\n\t outp.write('%02d' %fctimes[itime] + \\\n ',%4.1f' %offshoredata['wspd'][itime,isite] + \\\n #',%3d' %offshoredata['wdir'][itime,isite] + \\\n ',%4.2f' %offshoredata['hm0'][itime,isite] + \\\n ',%4.1f' %offshoredata['tp'][itime,isite] + \\\n ',%3d' %offshoredata['dirn'][itime,isite] + \\\n ',%4.2f' %surfdata['shorewardHs'][itime,isite] + \\\n ',%4.1f' %surfdata['shorewardT'][itime,isite] + \\\n ',%3d' %surfdata['relativeDirn'][itime,isite] + \\\n ',%4.2f' %surfdata['breakerHs'][itime,isite] + \\\n ',%4.2f' %surfdata['saturatedDepth'][itime,isite] + \\\n ',%4.2f' %surfdata['Hb1in3'][itime,isite] + \\\n ',%4.2f' %surfdata['Hb1in10'][itime,isite] + \\\n ',%1d' %surfdata['breakerType'][itime,isite] + '\\r\\n')\n outp.close()", "def importar_visualizar_tabela(self):\r\n\r\n self.tabela_clientes = pd.read_csv('telecom_users.csv') # armazenando arquivo csv em uma variavel\r\n self.tabela_clientes = self.tabela_clientes.drop([\"Unnamed: 0\"], axis=1) # apagando a coluna Unnamed: 0, axist=1 -> para excluir a coluna, axist=0 -> excluir a linha (exist = eixo)\r\n print(self.tabela_clientes)\r\n # print(self.tabela_clientes.columns) # para mostrar todas as colunas da tabela \r\n self.tabela_clientes['NovaColuna'] = 1 # criar uma nova coluna se não existir, se caso ja exista, irá substituir todos os valores na coluna para 1\r", "def csv_import():\n activities = current_user.get_supervised_activities()\n if activities == []:\n flash(\"Fonction non autorisée.\", \"error\")\n return redirect(url_for(\"event.index\"))\n\n choices = [(str(a.id), a.name) for a in activities]\n form = CSVForm(choices)\n\n if not form.is_submitted():\n form.description.data = current_app.config[\"DESCRIPTION_TEMPLATE\"]\n\n failed = []\n if form.validate_on_submit():\n activity_type = ActivityType.query.get(form.type.data)\n\n file = form.csv_file.data\n processed, failed = process_stream(\n file.stream, activity_type, form.description.data\n )\n\n flash(\n f\"Importation de {processed-len(failed)} éléments sur {processed}\",\n \"message\",\n )\n\n return render_template(\n \"import_csv.html\",\n form=form,\n failed=failed,\n title=\"Création d'event par CSV\",\n )", "def read_data(self):\n print 'Reading Data ...'\n fname = self.wpath + 'Data/' + self.city[2] + '-' + self.application + '.csv.bz2'\n self.dataset = loadtxt(fname, skiprows=1,\n dtype=[('lat', 'f8'), ('lng', 'f8'), ('time', 'i4'), ('user', 'S20')],\n usecols=(0, 1, 2, 3), delimiter=';', comments='#')", "def readFile(self,path):\n _file = pd.read_csv(path, sep='\\s+', engine='python', header=None)\n self._dataTable = pd.DataFrame(_file.iloc[:, 3:15])\n self._dataTable.columns = ['MGEMLEEF Avg age', 'MOSHOOFD Customer main type', 'MGODRK Roman catholic',\n 'MGODPR Protestant', 'MGODOV Other religion', 'MGODGE No religion', 'MRELGE Married',\n 'MRELSA Living together', 'MRELOV Other relation', 'MFALLEEN Singles',\n 'MFGEKIND Household without children', 'MFWEKIND Household with children']", "def to_csv(self, filename):\n self.data.to_csv(filename)", "def to_csv(self, filename):\n self.data.to_csv(filename)", "def tour_data(self):\n\n start_time = time.time()\n\n # load tour list into Pandas DataFrame\n tours = pd.read_csv(self.scenario_path + \"/output/jointTourData_\" + str(self.iteration) + \".csv\",\n usecols=[\"hh_id\", \"tour_id\", \"tour_participants\"])\n\n end_time = time.time()\n time_taken = end_time - start_time\n print(\"read tour data: \", str(datetime.timedelta(seconds=round(time_taken))))\n\n # return fields of interest\n return tours[[\"hh_id\",\n \"tour_id\",\n \"tour_participants\"]]", "def load_csv(filename):\n # Open csvfile\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n\n # Put data in gloabal list\n for row in reader:\n # Get data of subject with either or both milk and peanut allergy\n if row[\"MILK_ALG_START\"] != \"NA\" or row[\"PEANUT_ALG_START\"] != \"NA\":\n sub_list = list()\n for key in DATA_KEYS:\n sub_list.append(row[key])\n\n # Add data of subject to all data \n data_list.append(sub_list)", "def handle_csv(self):\n try:\n reader = csv.reader(open(self.options.datafile, 'r'))\n except IOError:\n errormsg(_('Cannot read \"{}\"'.format(self.options.datafile)))\n raise Exception(_('Cannot read \"{}\"'.format(self.options.datafile)))\n if self.options.var_type == 'name':\n try:\n self.header = reader.next()\n except StopIteration:\n errormsg(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n raise Exception(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n self.data = []\n for row in reader:\n self.data.append(row)", "def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return", "def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]", "def csv_reader(file_obj):\n global gagnant\n global gain\n # le tableau qui contient les gagnants\n reader = csv.reader(file_obj, delimiter=';')\n gagnant = next(reader)\n gain = next(reader)\n gagnant = [list(map(int,gagnant))]\n gagnant=gagnant[0]\n gain = [list(map(int,gain))]\n gain=gain[0]", "def CsvToJson(nomfichierJson):\n with open(\"save/save.csv\",'r') as f:\n liste_cube = list()\n liste_robot = list()\n \"\"\"deux listes vides pour contenir les objets charges\"\"\"\n for line in f:\n ligne=line.split(\";\")\n if ligne[0] == 'Arene':\n \"\"\"On cree une nouvelle arene avec les parametres trouves sur la ligne, separes par des ';' \"\"\"\n arene = Arene(int(ligne[1]),int(ligne[2]),int(ligne[3]),liste_cube,liste_robot)\n arene.liste_robot=liste_robot\n elif ligne[0] == 'Cube':\n \"\"\"On ajoute le cube a la liste de cube de l'arene, avec parametres trouves sur la ligne\"\"\"\n arene.liste_cube.append(Cube(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5]),int(ligne[6])))\n elif ligne[0] == 'Mur':\n arene.liste_cube.append(Mur(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5]),int(ligne[6])))\n elif ligne[0] == 'Sol':\n arene.liste_cube.append(Sol(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5])))\n elif ligne[0] == 'Robot':\n (x,y,z)=literal_eval(ligne[1])\n ((cax,cay),(cbx,cby),(ccx,ccy),(cdx,cdy))=literal_eval(ligne[2])\n (a,b,c)=literal_eval(ligne[3])\n (lo,la,ha)=literal_eval(ligne[4])\n vitesse=literal_eval(ligne[5])\n arene.liste_robot.append(Robot((x,y,z),((cax,cay),(cbx,cby),(ccx,ccy),(cdx,cdy)),(a,b,c),(lo,la,ha),vitesse))\n saveFic(arene,nomfichierJson)", "def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)", "def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)", "def spectre_csv(f):\n \n skip = 0\n while True:\n try: \n wav, flux = np.loadtxt(f, delimiter = ',',\n skiprows = skip, unpack = True)\n \n except ValueError:\n # Si les première lignes ont un en-tête\n skip += 1\n \n else:\n break\n \n return wav,flux", "def writeToCsv(clue):\n filename = 'new_clue_import_for_editing.csv'\n f = open(filename, 'w')\n fieldnames = list(set([m['Clue_field'] for m in mapping]))\n fieldnames.append('date')\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for c in clue:\n writer.writerow(c)\n f.close()", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def reformate_park_csv(list_num_park=[1, 2, 3],\n list_date_park=['2015', '2016'],\n sep=';'):\n\n # Reading parkX_20XX.csv ...\n df = create_df_park_data(list_num_park, list_date_park)\n\n # Dropping Useless columns for speed up\n df.drop(park_col_type['drop'], axis=1, inplace=True)\n\n # Converting in datetime types and keeping in GMT+01:\n print(\"Converting 'Date' column in datetime type ...\")\n df['Date'] = pd.to_datetime(df['Date'], format=\"%d/%m/%Y %H:%M\")\n\n # we create an ident for each hour \"Date_hour_int\"\n print('Constructing id for each date & hour ...')\n df[\"Date_hour_int\"] = df[\"Date\"].dt.year*10**6 + df[\"Date\"].dt.month*10**4\\\n + df[\"Date\"].dt.day*10**2 + df[\"Date\"].dt.hour\n\n # we create a dataframe with \"production_mean_hour\" value for each\n # Eolienne*date_hour_int\n print(\"Computing 'Production_mean_hour' ...\")\n df_product_mean = df[df[\"Fonctionnement\"] == 1]\\\n .groupby([\"Eolienne\", \"Date_hour_int\"])[\"Production\"]\\\n .mean().reset_index().rename(columns={\"Production\": \"Production_mean_hour\"})\n\n # we add this value in the initial dataset \"df\"\n df = pd.merge(df, df_product_mean,\n on=[\"Eolienne\", \"Date_hour_int\"], how=\"left\")\n df = df[park_col_type['keep']]\n\n # output csv files per turbine :\n for num_turb in range(1, 12):\n fname_out = data_reformated_dir + 'turb_' + str(num_turb) + '.csv'\n print('Storing ' + fname_out + ' ...')\n df_tmp = df.loc[df['Eolienne'] == 'Turb'+str(num_turb)]\n df_tmp.to_csv(fname_out, sep=sep, index=False)", "def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)", "def fao1():\n df = pd.read_csv(\"/Users/Elsa/Desktop/Covid_Agosto/Proyecto_Elsa/Proyecto_individual_Elsa/src/main/FAO.csv\",encoding=\"ISO-8859-1\")\n \n return df", "def user_list_csv():\n us = user.User.query.all()\n filename = 'xxx.csv'\n csv_name = _rename_file(filename)\n url = app.config['CSV_FILES_DEST'] + '/' + csv_name\n with codecs.open(url, 'wb') as csvfile:\n #fieldnames = ['账号', '姓名', '描述', '角色', '邮箱', '电话', '工作电话', '公司', '部门', '职位']\n fieldnames = []\n if len(us) > 0:\n fieldnames = us[0].to_csv_dict().keys()\n writer = unicodecsv.writer(csvfile, encoding='utf-8-sig')\n writer.writerow(fieldnames)\n for u in us:\n dct = u.to_csv_dict()\n n_items = {}\n for name in fieldnames:\n if dct[name] is not None:\n n_items[name] = dct[name]\n else:\n n_items[name] = ''\n writer.writerow(n_items.values())\n return send_file(url)", "def load_csv_model(filename) -> tuple:\n dat_sci = pd.read_csv(resources_folder(filename), index_col=0)\n commenter('data from ' + filename, lambda: print(dat_sci))\n\n ind = dat_sci.index\n # commenter('index', lambda: print(ind))\n col = dat_sci.columns\n # commenter('columns', lambda: print(col))\n # self.data = np.asmatrix(dat_sci.values)\n # commenter('data', lambda: print(self.data))\n # print(type(dat_sci))\n\n return dat_sci, ind, col", "def write_csv(elongation, file_name):\n e = elongation\n\n with open(file_name, 'w') as f:\n f.write(f\"\"\"\\\nBreak Load, {e.break_load()}\nBreak Strength, {e.break_strength()}\nBreak Elongation, {e.break_elongation()}\nYield Load, {e.yield_load()}\nYield Strength, {e.yield_strength()}\nYield Elongation, {e.yield_elongation()}\nGauge Length, {e.gauge_length}\nSample Width, {e.sample_width}\nSample Thickness, {e.sample_thickness}\n\nPoints\n %, N\"\"\")\n for x, y in zip(e.xs, e.ys):\n f.write(f'\\n{x:>8.4f}, {y:>8.4f}')", "def open_file(path):\r\n f = open(path, encoding='utf-8', errors='ignore')\r\n data = f.readlines()\r\n lst_with_data = []\r\n for i in data:\r\n i = i.replace('\"', ' ').replace(\"\\t\", ' ').replace(\"\\n\", \" \").replace(\"'\", ' ').split(' ')\r\n lst_with_data.append(i)\r\n res_lst = [] \r\n for i in lst_with_data:\r\n append_lst = []\r\n for j in i:\r\n if j.isdigit() or j == \"-\":\r\n append_lst.append(j) \r\n if len(append_lst) != 0: \r\n res_lst.append(append_lst) \r\n res_lst = res_lst[1:]\r\n res = [] \r\n for i in res_lst:\r\n if len(i) != len(res_lst[0]):\r\n i = i[1:]\r\n res.append(i) \r\n else:\r\n res.append(i) \r\n ln = len(res[0])\r\n data_by_years = []\r\n for i in range(ln):\r\n data_y = []\r\n for j in res:\r\n data_y.append(j[i])\r\n data_by_years.append(data_y) \r\n dict_by_years = {}\r\n dict_with_total = file_with_total_inform(\"Total_Lviv.csv\")\r\n for i in data_by_years:\r\n dict_by_years[int(i[0])] = causes(i)\r\n dict_by_years[int(i[0])].update({\"Total\": dict_with_total[i[0]]})\r\n res_dict = {}\r\n res_dict[\"Lviv\"] = dict_by_years \r\n return res_dict", "def CSV_Write_File( self, cvsfilename ):\n self.SVF = open( cvsfilename, 'w' )\n self.SVF.write( 'Site/Plot, Age, Tree#, OrigTree#, Species, Dia, Ht, Live/Dead, Status, Condition, TPA, CR, Crad, ' )\n self.SVF.write( 'BrokenHt, BrokenOff, Bearing, DMR, LeanAngle, RootWad, X, Y\\n' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n print( s )\n ymin = 9999\n ymax = 0\n trees = self.Data.Stand[s].Tree.keys()\n for t in trees:\n years = self.Data.Stand[s].Tree[t].Year.keys()\n for y in years:\n if( y < ymin ): ymin = y\n if( y > ymax ): ymax = y\n years = range( ymin, ymax+1, 5 )\n for y in years:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n if( not self.Data.Stand[s].Tree.has_key(t) ): continue\n if( not self.Data.Stand[s].Tree[t].Year.has_key(y) ): continue\n (species, dbh, ht, tpa, treeno, live, cclass, status) = ( self.Data.Stand[s].Tree[t].Species,\n self.Data.Stand[s].Tree[t].Year[y].DBH, self.Data.Stand[s].Tree[t].Year[y].Height,\n self.Data.Stand[s].Tree[t].Year[y].TPA, self.Data.Stand[s].Tree[t].TreeNumber,\n self.Data.Stand[s].Tree[t].Year[y].Live, self.Data.Stand[s].Tree[t].Year[y].Condition,\n self.Data.Stand[s].Tree[t].Year[y].Status )\n self.SVF.write( '%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\\n' % \\\n (s, y, t, treeno, species, dbh, ht, live, status, cclass, tpa ) )\n self.SVF.close()", "def procesarFilaCuerpo(fila):\r\n csv = \"\"\r\n columnas = fila.split(\"</td>\")\r\n for col in columnas:\r\n csv += procesarColumnaCuerpo(col)+\";\"\r\n \r\n csv = csv[:-1] #quitar el śltimo ;\r\n print csv\r\n return csv", "def fertility():\n fertility_csv = pd.read_csv(csv_path(\"attachment.csv\"), usecols=[1, 7], index_col=0)\n fertility_csv.columns = [\"Fertility\"]\n return fertility_csv", "def write_csv(file_name, data):\n\n with open(file_name, \"w\") as fp:\n\n writer = RiscvInstructionTraceCsv(fp)\n writer.start_new_trace()\n\n for entry in data:\n writer.write_trace_entry(entry)", "def save_data_csv(self, filename):\n #add masked entry as last column\n fields = numpy.r_[self.colLabels, ['masked']]\n\n #add dynamic expression to column headers\n for k, col in enumerate(self.dynamic_cols):\n fields[col] += \" [%s]\"%self.dynamic_expressions[k] if self.dynamic_expressions[k] else ''\n\n #add custom labels to field names \n for col, fieldname in enumerate(fields):\n custom_label = self.column_labels_custom.get(col)\n fields[col] += \" (%s)\"%custom_label if custom_label else ''\n\n fields[col] += \" {*}\" if (col in self.colsel and (fieldname.find('user')==0 or col in self.dynamic_cols)) else ''\n \n #add options\n \n \n #don't save last two lines\n data = numpy.c_[self.data[:-2], self.rowmask[:-2]]\n\n with open(filename, 'wb') as f:\n import csv\n writer = csv.writer(f)\n writer.writerow(fields)\n #writer.writerows(data)\n for row in data:\n r = [entry.encode('latin_1') if type(entry) is types.UnicodeType else entry for entry in row]\n writer.writerow(r)\n self.modified = False", "def dotAstro_to_csv(id):\n id = str(id)\n isError = False\n if(\"http\" in id):\n url = id\n elif id.isdigit():\n url = \"http://dotastro.org/lightcurves/vosource.php?Source_ID=\" + id\n else:\n print(\"dotAstro ID not a digit.\")\n try:\n lc = urllib.request.urlopen(url).read()\n if lc.find(\"<TD>\") == -1:\n raise urllib.error.URLError('No data for specified source ID.')\n\n except (IOError, urllib.error.URLError) as error:\n print(\"Could not read specified file.\", id, error)\n isError = True\n return False\n except Exception as error:\n print(\"Error encountered.\", id, error)\n isError = True\n return False\n\n lcs = []\n numlcs = 0\n lcdata = lc\n soup = BeautifulSoup(lcdata)\n try:\n ra = float(soup('position2d')[0]('value2')[0]('c1')[0]\n .renderContents())\n dec = float(soup('position2d')[0]('value2')[0]('c2')[0]\n .renderContents())\n except IndexError:\n print('position2d/value2/c1 or c2 tag not present in light curve file')\n ra, dec = [None, None]\n time_unit = []\n for timeunitfield in soup(ucd=\"time.epoch\"):\n time_unit.append(timeunitfield['unit'])\n\n for data_table in soup('tabledata'):\n csv_str = \"\"\n for row in data_table('tr'):\n tds = row(\"td\")\n if len(tds) == 3:\n csv_str += ','.join([\n str(tds[0].renderContents()),\n str(tds[1].renderContents()),\n str(tds[2].renderContents())]) + '\\n'\n\n if len(csv_str) > 0:\n lcs.append(csv_str)\n numlcs += 1\n\n return lcs", "def sbj_to_csv(fname, timestamp):\n f = open(fname, 'r')\n lines = [ line.split('\\t') for line in f.read().split(IFS) ]\n maxlen = max( [ len(line) for line in lines ] )\n times = ['timestamp'] + [timestamp]*(maxlen - 1)\n lines = [times] + [ line for line in lines if len(line) == maxlen ]\n lines = zip(*lines) # this transposes the data\n lines = [ '\\t'.join(line) for line in lines ]\n tdl_to_csv('%s_questionnaire.csv' % timestamp, lines)", "def read_full_data(self):\n x=[]\n y=[]\n z=[]\n with open(self.file, 'r') as rf:\n reader = csv.reader(rf, delimiter=',')\n for row in reader:\n x.append(row[0])\n # Remove header from csv file, if it exists\n if x[0].split()[0] == '%':\n x.remove(row[0])\n else:\n y.append(row[1])\n z.append(row[2])\n return x,y,z", "def load_data(self, filename):\r\n #sqlcontext = SQLContext(self.sc)\r\n #df = sqlcontext.read.format('com.databricks.spark.csv').options(header='false', inferschema='true').load(filename)\r\n #df = sc.textFile(r\"C:\\Users\\mohan\\Downloads\\patches.csv\").map(lambda line: line.split(\",\"))\r\n #print (df.count())\r\n df = self.sc.textFile(filename).map(lambda line: line.split(\",\"))\r\n l = df.map(lambda w: [int(float(c)) for c in w]).zipWithIndex()\r\n return l\r\n raise NotImplementedError", "def _save_data(self):\n self.data.to_csv('data/c&le/{}'.format(self.name))", "def save_as_csv(time_series, data, path_and_file_name):\n\n parent_name = \"test\"\n parent_uqid = uuid.uuid4()\n\n file_obj = open(path_and_file_name, 'w')\n file_obj.write('version,'+str(2)+'\\n')\n file_obj.write('numOfCH,'+str(1)+'\\n')\n file_obj.write('type, scan\\n')\n file_obj.write('ch_type,'+str(0)+'\\n')\n\n file_obj.write('carpet pos,'+str(0)+'\\n')\n file_obj.write('parent_name,'+str(parent_name)+'\\n')\n file_obj.write('parent_uqid,'+str(parent_uqid)+'\\n')\n file_obj.write('parent_filename,'+str(path_and_file_name)+'\\n')\n\n file_obj.write('pc, 0\\n')\n file_obj.write('Time (ns), CH0 Auto-Correlation\\n')\n for time_step in range(0, time_series.shape[0]):\n file_obj.write(str(float(time_series[time_step]))+','+str(data[time_step])+ '\\n')\n file_obj.write('end\\n')\n\n file_obj.close()", "def to_csv(self, \n last_match_id, \n first_match_id = 0, \n file_count = 20, \n start_file = 0, \n matches_per_file = 20000):\n for i in range(start_file, start_file + file_count):\n print(i)\n last_match_id_current = last_match_id - i * matches_per_file\n file_name = 'rawdata_' + str(i) + '.csv'\n currunt_dataframe = self.mine_data(file_name = file_name,\n first_match_id = first_match_id,\n last_match_id = last_match_id_current,\n stop_at = matches_per_file)\n currunt_dataframe.to_csv('rawdata_' + str(i) + '.csv')", "def read_data():\n data = pd.read_csv('input_data/Preply_tutor_views_datasaet.csv')\n return data", "def csvdata():\n return render_template(\"data.html\")", "def run_create_hyper_file_from_csv():\n if args.preprocessed:\n print('running on 4 columns')\n else:\n print('running on 16 columns')\n\n load_time = -1\n query_time = -1\n tstart = time.time()\n path_to_database = Path(\"lineitem.hyper\")\n\n # Optional process parameters.\n # They are documented in the Tableau Hyper documentation, chapter \"Process Settings\"\n # (https://help.tableau.com/current/api/hyper_api/en-us/reference/sql/processsettings.html).\n process_parameters = {\n # Limits the number of Hyper event log files to two.\n #\"log_file_max_count\": \"2\",\n # Limits the size of Hyper event log files to 100 megabytes.\n #\"log_file_size_limit\": \"100M\"\n \"soft_concurrent_query_thread_limit\" : \"16\",\n \"hard_concurrent_query_thread_limit\" : \"16\",\n \"memory_limit\" : \"100g\"\n }\n\n # single threaded?\n if args.single_threaded:\n process_parameters[\"soft_concurrent_query_thread_limit\"] = \"1\"\n process_parameters[\"hard_concurrent_query_thread_limit\"] = \"1\"\n\n result = None\n\n # Starts the Hyper Process with telemetry enabled to send data to Tableau.\n # To opt out, simply set telemetry=Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU.\n with HyperProcess(telemetry=Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU,\n parameters=process_parameters) as hyper:\n\n # Optional connection parameters.\n # They are documented in the Tableau Hyper documentation, chapter \"Connection Settings\"\n # (https://help.tableau.com/current/api/hyper_api/en-us/reference/sql/connectionsettings.html).\n connection_parameters = {\"lc_time\": \"en_US\"}\n\n # Creates new Hyper file \"customer.hyper\".\n # Replaces file with CreateMode.CREATE_AND_REPLACE if it already exists.\n with Connection(endpoint=hyper.endpoint,\n database=path_to_database,\n create_mode=CreateMode.CREATE_AND_REPLACE,\n parameters=connection_parameters) as connection:\n\n table_name = ''\n if args.preprocessed:\n connection.catalog.create_table(table_definition=lineitem_table_preprocessed)\n table_name = lineitem_table_preprocessed.table_name\n else:\n connection.catalog.create_table(table_definition=lineitem_table)\n table_name = lineitem_table.table_name\n\n # Using path to current file, create a path that locates CSV file packaged with these examples.\n path_to_csv = args.data_path\n\n # Load all rows into \"Lineitem\" table from the CSV file.\n # `execute_command` executes a SQL statement and returns the impacted row count.\n count_in_lineitem_table = connection.execute_command(\n command=f\"COPY {table_name} from {escape_string_literal(path_to_csv)} with \"\n f\"(format csv, NULL 'NULL', delimiter '|')\")\n\n print(f\"The number of rows in table {lineitem_table.table_name} is {count_in_lineitem_table}.\")\n load_time = time.time() - tstart\n print('Loading CSV to Hyper took {}s'.format(load_time))\n tstart = time.time()\n # issue query\n # here, TPC-H Q6\n # SELECT\n # sum(l_extendedprice * l_discount) as revenue\n # FROM\n # lineitem\n # WHERE\n # l_shipdate >= date '1994-01-01'\n # AND l_shipdate < date '1994-01-01' + interval '1' year\n # AND l_discount between 0.06 - 0.01 AND 0.06 + 0.01\n # AND l_quantity < 24;\n\n q = ''\n if args.preprocessed:\n q = f\"\"\"SELECT\n sum(l_extendedprice * l_discount) as revenue\nFROM\n {table_name}\nWHERE\n l_shipdate >= 19940101\n AND l_shipdate < 19950101\n AND l_discount between 0.06 - 0.01 AND 0.06 + 0.01\n AND l_quantity < 24\"\"\"\n else:\n q = f\"\"\"SELECT\n sum(l_extendedprice * l_discount) as revenue\nFROM\n {table_name}\nWHERE\n l_shipdate >= date '1994-01-01'\n AND l_shipdate < date '1994-01-01' + interval '1' year\n AND l_discount between 0.06 - 0.01 AND 0.06 + 0.01\n AND l_quantity < 24\"\"\"\n\n result = connection.execute_list_query(query=q)\n query_time = time.time() - tstart\n print('Query took {}s'.format(query_time))\n print('Result::')\n print(result)\n \n print(\"The connection to the Hyper file has been closed.\")\n print(\"The Hyper process has been shut down.\")\n print('framework,version,load,query,result\\n{},{},{},{},{}'.format('hyper',hyperversion,load_time, query_time, str(result)))", "def dwn_rel_sup_csv(request):\n i = int(request.GET.get('i'))\n \n return FileResponse(open('temp/relation_support_datasets/relation_support_dataset_{}_{}.csv'.format(i, request.user.username),'rb'))", "def dlCsvReport(self):\r\n requestElems = {'xf': 'csv'}\r\n requestElems.update(self.getReportConfig())\r\n \r\n csvdata = self.sendRequest(self.reportFormURL, self.fileOpener,\r\n requestElems, 'POST').read()\r\n\r\n self.writeExportFile('csv', csvdata)" ]
[ "0.7196465", "0.6494136", "0.64422953", "0.64121443", "0.63839144", "0.634418", "0.63111466", "0.62877053", "0.6264619", "0.624226", "0.6232147", "0.6205612", "0.61853033", "0.6169304", "0.6101325", "0.6096724", "0.6090506", "0.6071633", "0.6044117", "0.5981105", "0.597794", "0.59596777", "0.593063", "0.5930304", "0.59272367", "0.59003043", "0.5888533", "0.58680445", "0.5863402", "0.585969", "0.5851674", "0.58493316", "0.5848923", "0.5844477", "0.5842624", "0.58425945", "0.5838598", "0.58366406", "0.5831524", "0.58186835", "0.58100235", "0.5806256", "0.5801715", "0.5785666", "0.5785593", "0.57712597", "0.57699233", "0.57656366", "0.5759774", "0.57590723", "0.57516795", "0.5749918", "0.5745144", "0.57442147", "0.5741302", "0.5728066", "0.57122904", "0.57079273", "0.5701129", "0.56961244", "0.569547", "0.56856054", "0.5680901", "0.5680702", "0.5680702", "0.56662637", "0.56658393", "0.5665457", "0.56615436", "0.565376", "0.5652166", "0.5646607", "0.5638974", "0.56386274", "0.5638404", "0.5633991", "0.5631685", "0.56315714", "0.56293285", "0.56278175", "0.5626634", "0.56247634", "0.5617451", "0.56171924", "0.56155175", "0.5613028", "0.561187", "0.5610767", "0.5610429", "0.560871", "0.5606258", "0.5596317", "0.55886096", "0.5586897", "0.55814844", "0.558093", "0.55793864", "0.55752164", "0.5572047", "0.55720305", "0.5570506" ]
0.0
-1
Run ShRec3D on all data in data directory
def generate_data(out_fname, data_directory): def store_result(duration, loci_number): """ Store result of current timing run """ print(' %ds for %d loci' % (duration, loci_number)) if os.path.isfile(out_fname): with open(out_fname, 'r') as fd: cur = json.load(fd) else: cur = [] with open(out_fname, 'w') as fd: cur.append((loci_number, duration)) json.dump(cur, fd) for fn in os.listdir(data_directory): fname = os.path.join(data_directory, fn) print('Loading "%s"...' % fname, end=' ', flush=True) contacts = np.loadtxt(fname) print('Done') start = time.time() try: apply_shrec3d(contacts) except: print('>>> Some error occured') traceback.print_exc() end = time.time() store_result(end-start, contacts.shape[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_dataset(data: DataSetBase) -> None:\n\n tracks_manager = data.load_tracks_manager()\n reconstructions = data.load_reconstruction()\n\n all_shot_ids = set(tracks_manager.get_shot_ids())\n for r in reconstructions:\n for shot in r.shots.values():\n if shot.id in all_shot_ids:\n vertices, faces = mesh.triangle_mesh(shot.id, r, tracks_manager)\n shot.mesh.vertices = vertices\n shot.mesh.faces = faces\n\n data.save_reconstruction(\n reconstructions, filename=\"reconstruction.meshed.json\", minify=True\n )", "def run_dataset(data: DataSetBase):\n\n tracks_manager = data.load_tracks_manager()\n report, reconstructions = reconstruction.incremental_reconstruction(\n data, tracks_manager\n )\n data.save_reconstruction(reconstructions)\n data.save_report(io.json_dumps(report), \"reconstruction.json\")", "def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)", "def process_scene_data(self, scene, data, tmp_dir):\n pass", "def main():\n onlyfiles = [f for f in listdir(RAWDATA_PATH) if isfile(join(RAWDATA_PATH, f))]\n for file in onlyfiles:\n create_RCSB_fastas(file)", "def read_data(args):\n\n print(\"Start read_data\")\n t_tot = 0 # sum of times for the all dataset\n date_dirs = os.listdir(args.path_data_base)\n for n_iter, date_dir in enumerate(date_dirs):\n # get access to each sequence\n path1 = os.path.join(args.path_data_base, date_dir)\n if not os.path.isdir(path1):\n continue\n date_dirs2 = os.listdir(path1)\n\n for date_dir2 in date_dirs2:\n path2 = os.path.join(path1, date_dir2)\n if not os.path.isdir(path2):\n continue\n # read data\n oxts_files = sorted(glob.glob(os.path.join(path2, 'oxts', 'data', '*.txt')))\n oxts = KITTIDataset.load_oxts_packets_and_poses(oxts_files)\n\n \"\"\" Note on difference between ground truth and oxts solution:\n - orientation is the same\n - north and east axis are inverted\n - position are closed to but different\n => oxts solution is not loaded\n \"\"\"\n\n print(\"\\n Sequence name : \" + date_dir2)\n if len(oxts) < KITTIDataset.min_seq_dim: #  sequence shorter than 30 s are rejected\n cprint(\"Dataset is too short ({:.2f} s)\".format(len(oxts) / 100), 'yellow')\n continue\n lat_oxts = np.zeros(len(oxts))\n lon_oxts = np.zeros(len(oxts))\n alt_oxts = np.zeros(len(oxts))\n roll_oxts = np.zeros(len(oxts))\n pitch_oxts = np.zeros(len(oxts))\n yaw_oxts = np.zeros(len(oxts))\n roll_gt = np.zeros(len(oxts))\n pitch_gt = np.zeros(len(oxts))\n yaw_gt = np.zeros(len(oxts))\n t = KITTIDataset.load_timestamps(path2)\n acc = np.zeros((len(oxts), 3))\n acc_bis = np.zeros((len(oxts), 3))\n gyro = np.zeros((len(oxts), 3))\n gyro_bis = np.zeros((len(oxts), 3))\n p_gt = np.zeros((len(oxts), 3))\n v_gt = np.zeros((len(oxts), 3))\n v_rob_gt = np.zeros((len(oxts), 3))\n\n k_max = len(oxts)\n for k in range(k_max):\n oxts_k = oxts[k]\n t[k] = 3600 * t[k].hour + 60 * t[k].minute + t[k].second + t[\n k].microsecond / 1e6\n lat_oxts[k] = oxts_k[0].lat\n lon_oxts[k] = oxts_k[0].lon\n alt_oxts[k] = oxts_k[0].alt\n acc[k, 0] = oxts_k[0].af\n acc[k, 1] = oxts_k[0].al\n acc[k, 2] = oxts_k[0].au\n acc_bis[k, 0] = oxts_k[0].ax\n acc_bis[k, 1] = oxts_k[0].ay\n acc_bis[k, 2] = oxts_k[0].az\n gyro[k, 0] = oxts_k[0].wf\n gyro[k, 1] = oxts_k[0].wl\n gyro[k, 2] = oxts_k[0].wu\n gyro_bis[k, 0] = oxts_k[0].wx\n gyro_bis[k, 1] = oxts_k[0].wy\n gyro_bis[k, 2] = oxts_k[0].wz\n roll_oxts[k] = oxts_k[0].roll\n pitch_oxts[k] = oxts_k[0].pitch\n yaw_oxts[k] = oxts_k[0].yaw\n v_gt[k, 0] = oxts_k[0].ve\n v_gt[k, 1] = oxts_k[0].vn\n v_gt[k, 2] = oxts_k[0].vu\n v_rob_gt[k, 0] = oxts_k[0].vf\n v_rob_gt[k, 1] = oxts_k[0].vl\n v_rob_gt[k, 2] = oxts_k[0].vu\n p_gt[k] = oxts_k[1][:3, 3]\n Rot_gt_k = oxts_k[1][:3, :3]\n roll_gt[k], pitch_gt[k], yaw_gt[k] = IEKF.to_rpy(Rot_gt_k)\n\n t0 = t[0]\n t = np.array(t) - t[0]\n # some data can have gps out\n if np.max(t[:-1] - t[1:]) > 0.1:\n cprint(date_dir2 + \" has time problem\", 'yellow')\n ang_gt = np.zeros((roll_gt.shape[0], 3))\n ang_gt[:, 0] = roll_gt\n ang_gt[:, 1] = pitch_gt\n ang_gt[:, 2] = yaw_gt\n\n p_oxts = lla2ned(lat_oxts, lon_oxts, alt_oxts, lat_oxts[0], lon_oxts[0],\n alt_oxts[0], latlon_unit='deg', alt_unit='m', model='wgs84')\n p_oxts[:, [0, 1]] = p_oxts[:, [1, 0]] # see note\n\n # take correct imu measurements\n u = np.concatenate((gyro_bis, acc_bis), -1)\n # convert from numpy\n t = torch.from_numpy(t)\n p_gt = torch.from_numpy(p_gt)\n v_gt = torch.from_numpy(v_gt)\n ang_gt = torch.from_numpy(ang_gt)\n u = torch.from_numpy(u)\n\n # convert to float\n t = t.float()\n u = u.float()\n p_gt = p_gt.float()\n ang_gt = ang_gt.float()\n v_gt = v_gt.float()\n\n mondict = {\n 't': t, 'p_gt': p_gt, 'ang_gt': ang_gt, 'v_gt': v_gt,\n 'u': u, 'name': date_dir2, 't0': t0\n }\n\n t_tot += t[-1] - t[0]\n KITTIDataset.dump(mondict, args.path_data_save, date_dir2)\n print(\"\\n Total dataset duration : {:.2f} s\".format(t_tot))", "def main() -> None:\n ROOT_DIR = dirname(abspath(__file__))\n spark = create_spark_session()\n input_data = 's3a://udacity-dend/'\n output_data = ROOT_DIR + '/data/'\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://udacity-data-lake/output/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def load_hrrr_data(bucket, run_date, run_hour, variables):\n run_date_str = pd.to_datetime(run_date).strftime(\"%Y%m%d\")\n forecast_hour_str = pd.to_datetime(join(run_date, run_hour)).strftime(\"%H\")\n datasets = []\n coords = None\n for i, variable in enumerate(variables):\n files = []\n level = variable.split('-')[1]\n variable = variable.split('-')[0]\n fs = s3fs.S3FileSystem(anon=True)\n if i == 0:\n coord_path = join(bucket, run_date_str, f'{run_date_str}_{forecast_hour_str}z_fcst.zarr', level, variable)\n coords = s3fs.S3Map(root=coord_path, s3=fs, check=False)\n path = join(bucket, run_date_str, f'{run_date_str}_{forecast_hour_str}z_fcst.zarr', level, variable, level)\n f = s3fs.S3Map(root=path, s3=fs, check=False)\n files.append(coords)\n files.append(f)\n ds = xr.open_mfdataset(files, engine='zarr').load()\n ds[variable] = ds[variable].astype('float32')\n datasets.append(ds)\n\n all_ds = xr.merge(datasets).drop(['projection_x_coordinate', 'projection_y_coordinate', 'forecast_period'])\n all_ds = all_ds.rename_dims(dict(projection_x_coordinate='x', projection_y_coordinate='y', time='Time'))\n\n all_ds = all_ds.reset_coords('time').rename(dict(time='valid_time'))\n\n return all_ds", "def test_3d_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/ft/test001.ft3\")\n\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n\n # and the first slice\n assert sdata.shape == (128, 4096)\n assert sdata.dtype == 'float32'\n assert round(sdata[1,2],2) == 25980.13\n assert round(sdata[22,5],2) == -8336.05\n check_ppm_limits(sdic,sdata,0,[147.42, 93.01])\n check_ppm_limits(sdic,sdata,1,[254.92, -142.83])\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)", "def _regrid_dataset(in_dir, var, cfg):\n filelist = glob.glob(os.path.join(in_dir, var['file']))\n for infile in filelist:\n _, infile_tail = os.path.split(infile)\n outfile_tail = infile_tail.replace('c3s', 'c3s_regridded')\n outfile = os.path.join(cfg['work_dir'], outfile_tail)\n with catch_warnings():\n filterwarnings(\n action='ignore',\n # Full message:\n # UserWarning: Skipping global attribute 'long_name':\n # 'long_name' is not a permitted attribute\n message=\"Skipping global attribute 'long_name'\",\n category=UserWarning,\n module='iris',\n )\n lai_cube = iris.load_cube(infile,\n constraint=NameConstraint(\n var_name=var['raw']))\n lai_cube = regrid(lai_cube, cfg['custom']['regrid_resolution'],\n 'nearest')\n logger.info(\"Saving: %s\", outfile)\n\n iris.save(lai_cube, outfile)", "def run(self):\n\n #retrieve tags\n self.bqSession.update_mex('Extracting properties')\n\n #type check\n hdf_resource = self.bqSession.fetchxml(self.options.InputFile, view='deep,clean')\n if (hdf_resource.tag != 'resource' or hdf_resource.get('resource_type', '') != 'table') and hdf_resource.tag != 'table':\n raise Dream3DError(\"trying to run Dream3D on non-table resource\")\n\n hdf_url = self.bqSession.service_url('blob_service', path=hdf_resource.get('resource_uniq'))\n self.bqSession.fetchblob(hdf_url, path=os.path.join(self.options.stagingPath, 'input.h5'))\n hdf_input_file = os.path.join(self.options.stagingPath, 'input.h5')\n hdf_output_file = os.path.join(self.options.stagingPath, 'output.h5')\n\n # create pipeline with correct parameters\n pipeline_params = self.bqSession.mex.xmltree.xpath('tag[@name=\"inputs\"]/tag[@name=\"pipeline_params\"]/tag')\n params = {}\n for tag in pipeline_params:\n params[tag.get('name','')] = getattr(self.options, tag.get('name',''))\n pipeline_file, err_file = self._instantiate_pipeline(pipeline_url=self.options.pipeline_url, input_file=hdf_input_file, output_file=hdf_output_file, params=params)\n\n # run Dream3D on the pipeline\n self.bqSession.update_mex('Running Dream3D')\n log.debug('run Dream3D on %s', pipeline_file)\n res = 1\n with open(err_file, 'w') as fo:\n# res = 0 #!!! TESTING\n# open(hdf_output_file, 'a').close()\n res = subprocess.call(['/dream3d/bin/PipelineRunner',\n '-p',\n pipeline_file],\n stderr=fo, stdout=fo)\n log.debug(\"Dream3D returned: %s\", str(res))\n\n if res > 0:\n err_msg = 'pipeline execution failed\\n'\n with open(err_file, 'r') as fo:\n err_msg += ''.join(fo.readlines())\n if len(err_msg) > 1024:\n err_msg = err_msg[:512] + '...' + err_msg[-512:]\n raise Dream3DError(err_msg)\n\n self.output_file = hdf_output_file", "def runMT3D(self):\n \n # write mt3dms input\n self.__mt.write_input()\n # run mt3dms\n self.__mt.run_model()", "def read_data_files(self):\n\n for name, snap in zip(self.names, self.snaps):\n # build the very important dictionary:\n key = f'{name}_{snap:03}' # e.g 'MW_000'\n self.galaxies[key] = Galaxy(name, snap, self.path, \n self.usesql, self.ptype, self.stride)\n self.time = self.galaxies[key].time\n\n # bits of minor housekeeping:\n # self.path = self.galaxies[key].filepath # may speed up next search\n self.filenames.append(key)", "def REDS(mode):\n #### configurations\n read_all_imgs = False # whether real all images to memory with multiprocessing\n # Set False for use limited memory\n BATCH = 5000 # After BATCH images, lmdb commits, if read_all_imgs = False\n if mode == 'train_sharp':\n img_folder = '../../datasets/REDS/train_sharp'\n lmdb_save_path = '../../datasets/REDS/train_sharp_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_sharp_bicubic':\n img_folder = '../../datasets/REDS/train_sharp_bicubic'\n lmdb_save_path = '../../datasets/REDS/train_sharp_bicubic_wval.lmdb'\n H_dst, W_dst = 180, 320\n elif mode == 'train_blur_bicubic':\n img_folder = '../../datasets/REDS/train_blur_bicubic'\n lmdb_save_path = '../../datasets/REDS/train_blur_bicubic_wval.lmdb'\n H_dst, W_dst = 180, 320\n elif mode == 'train_blur':\n img_folder = '../../datasets/REDS/train_blur'\n lmdb_save_path = '../../datasets/REDS/train_blur_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_blur_comp':\n img_folder = '../../datasets/REDS/train_blur_comp'\n lmdb_save_path = '../../datasets/REDS/train_blur_comp_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_sharp_flowx4':\n img_folder = '../../datasets/REDS/train_sharp_flowx4'\n lmdb_save_path = '../../datasets/REDS/train_sharp_flowx4.lmdb'\n H_dst, W_dst = 360, 320\n n_thread = 40\n ########################################################\n if not lmdb_save_path.endswith('.lmdb'):\n raise ValueError(\"lmdb_save_path must end with \\'lmdb\\'.\")\n if osp.exists(lmdb_save_path):\n print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path))\n sys.exit(1)\n\n #### read all the image paths to a list\n print('Reading image path list ...')\n all_img_list = data_util._get_paths_from_images(img_folder)\n keys = []\n for img_path in all_img_list:\n split_rlt = img_path.split('/')\n folder = split_rlt[-2]\n img_name = split_rlt[-1].split('.png')[0]\n keys.append(folder + '_' + img_name)\n\n if read_all_imgs:\n #### read all images to memory (multiprocessing)\n dataset = {} # store all image data. list cannot keep the order, use dict\n print('Read images with multiprocessing, #thread: {} ...'.format(n_thread))\n pbar = util.ProgressBar(len(all_img_list))\n\n def mycallback(arg):\n '''get the image data and update pbar'''\n key = arg[0]\n dataset[key] = arg[1]\n pbar.update('Reading {}'.format(key))\n\n pool = Pool(n_thread)\n for path, key in zip(all_img_list, keys):\n pool.apply_async(read_image_worker, args=(path, key), callback=mycallback)\n pool.close()\n pool.join()\n print('Finish reading {} images.\\nWrite lmdb...'.format(len(all_img_list)))\n\n #### create lmdb environment\n data_size_per_img = cv2.imread(all_img_list[0], cv2.IMREAD_UNCHANGED).nbytes\n print('data size per image is: ', data_size_per_img)\n data_size = data_size_per_img * len(all_img_list)\n env = lmdb.open(lmdb_save_path, map_size=data_size * 10)\n\n #### write data to lmdb\n pbar = util.ProgressBar(len(all_img_list))\n txn = env.begin(write=True)\n for idx, (path, key) in enumerate(zip(all_img_list, keys)):\n pbar.update('Write {}'.format(key))\n key_byte = key.encode('ascii')\n data = dataset[key] if read_all_imgs else cv2.imread(path, cv2.IMREAD_UNCHANGED)\n if 'flow' in mode:\n H, W = data.shape\n assert H == H_dst and W == W_dst, 'different shape.'\n else:\n H, W, C = data.shape\n assert H == H_dst and W == W_dst and C == 3, 'different shape.'\n txn.put(key_byte, data)\n if not read_all_imgs and idx % BATCH == 0:\n txn.commit()\n txn = env.begin(write=True)\n txn.commit()\n env.close()\n print('Finish writing lmdb.')\n\n #### create meta information\n meta_info = {}\n meta_info['name'] = 'REDS_{}_wval'.format(mode)\n channel = 1 if 'flow' in mode else 3\n meta_info['resolution'] = '{}_{}_{}'.format(channel, H_dst, W_dst)\n meta_info['keys'] = keys\n pickle.dump(meta_info, open(osp.join(lmdb_save_path, 'meta_info.pkl'), \"wb\"))\n print('Finish creating lmdb meta info.')", "def main(root_dir=(join(Path(__file__).resolve().parents[1], 'data')), apply_masks=True):\n\n # Iterate over each volume in the root data directory\n for folder_name in os.listdir(root_dir):\n print(join(root_dir, folder_name))\n if 'results' not in folder_name and 'subj' in folder_name:\n\n # Create all of the directories and subdirectories\n create_train_test_val_dirs(join(root_dir, folder_name))\n\n # Populate the train, val, and test directories and their subdirectories\n populate_train_test_val_dirs_nonrandomly(join(root_dir, folder_name),\n val_ratio=0.00,\n test_ratio=0.00,\n preliminary_clahe=True,\n apply_masks=apply_masks)\n\n if apply_masks:\n # Apply masks to all of the images in this volume\n apply_masks_to_volume(join(root_dir, folder_name))\n\n # Get and save the residuals between ClearImages and CoregisteredBlurryImages\n create_and_populate_residual_dirs(join(root_dir, folder_name))", "def load_egohands_dataset(root: str):\n\n # iterate over all sub-directory in root\n for dir_name in os.listdir(root):\n path = os.path.join(root, dir_name)\n if os.path.isdir(path):\n # path is the sub-directory of root\n # check the presence of polygons.mat in the directory\n full_path = os.path.join(path, 'polygons.mat')\n if os.path.isfile(full_path):\n # get the list of frames, which is all file in the directory with \"frame_\" and \".jpg\" in the file name\n # we don't have to make this a list, since sorting on the iterable is acceptable\n frames = filter(lambda fn: 'frame_' in fn and '.jpg' in fn, os.listdir(path))\n # os.listdir list file with correct order only on some platforms, so we have to sort it to make sure the rank is correct\n frames = sorted(frames)\n\n # we treat sub-directory name in root as the scene name\n scene = dir_name\n\n # load all polygons, and change its format into what we want (3-d array)\n polygons = loadmat(full_path)['polygons'][0]\n polygons = np.stack([ polygons[label] for label in orig_labels ], axis=1)\n\n # co-iterate frame and polygon\n # if len(frames) and len(polygons) are not the same, exception will be thrown\n for framedata in zip(frames, polygons):\n\n # retrive frame-polygon pair\n f, p = framedata\n f = os.path.join(path, f) # build full path of frame\n\n # calculate bounding rect of each polygon (we do not use MaskRCNN so the rectangle region should work)\n boxes = []\n labels = []\n for label_id in range(len(orig_labels)):\n label_name = orig_labels[label_id]\n if p[label_id].shape[1] != 0:\n boxes.append(torch.tensor(get_bounding_rect(p[label_id].squeeze()), dtype=torch.float))\n labels.append(label_id)\n \n # if we store image in memory, load image now\n if MEMORY_CACHE:\n f = cv2.imread(f)\n f = torch.from_numpy(f).permute((2, 0, 1)).float() # change shape into (band, width, height)\n \n # if we have a box in this frame, show it\n if len(boxes) > 0:\n yield { 'file': f, 'scene': scene, 'boxes': torch.stack(boxes), 'labels': torch.tensor(labels, dtype=torch.int64) }\n else:\n print('Warning: {} does not exist.'.format(full_path))\n return", "def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)", "def load_ismrmrd_ifft3d_reconstruction(filename):\n\n if not os.path.isfile(filename):\n print(\"%s is not a valid file\" % filename)\n raise SystemExit\n dset = ismrmrd.Dataset(filename, 'dataset', create_if_needed=False)\n\n #Read some fields from the XML header\n hdr = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header())\n #get encoding and reconstruction information\n enc = hdr.encoding[0]\n # Matrix size\n eNx = enc.encodedSpace.matrixSize.x\n eNy = enc.encodedSpace.matrixSize.y\n eNz = enc.encodedSpace.matrixSize.z\n rNx = enc.reconSpace.matrixSize.x\n rNy = enc.reconSpace.matrixSize.y\n\n # Number of Slices, Reps, Contrasts, etc.\n #We have to wrap the following in a if/else because a valid xml header may\n #not have an entry for some of the parameters\n ncoils = hdr.acquisitionSystemInformation.receiverChannels\n if enc.encodingLimits.slice != None:\n nslices = enc.encodingLimits.slice.maximum + 1\n else:\n nslices = 1\n\n if enc.encodingLimits.repetition != None:\n nreps = enc.encodingLimits.repetition.maximum + 1\n else:\n nreps = 1\n\n if enc.encodingLimits.contrast != None:\n ncontrasts = enc.encodingLimits.contrast.maximum + 1\n else:\n ncontrasts = 1\n\n\n # Loop through the acquisitions looking for noise scans\n firstacq = 0\n for acqnum in range(dset.number_of_acquisitions()):\n acq = dset.read_acquisition(acqnum)\n\n # TODO: Currently ignoring noise scans\n if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):\n print(\"Found noise scan at acq \", acqnum)\n continue\n else:\n firstacq = acqnum\n print(\"Imaging acquisition starts acq \", acqnum)\n break\n\n # Initialiaze a storage array\n all_data = np.zeros((nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx), dtype=np.complex64)\n\n # Loop through the rest of the acquisitions and stuff\n for acqnum in range(firstacq, dset.number_of_acquisitions()):\n acq = dset.read_acquisition(acqnum)\n head = acq.getHead()\n\n # TODO: this is where we would apply noise pre-whitening\n\n #padd if acquisition data is not complete (padding)\n if acq.data.shape[1]<eNx :\n x0=int((eNx - acq.data.shape[1]) / 2)\n zeros = np.zeros((acq.data.shape[0], x0))\n padded_acq_data = np.append(np.append(zeros, acq.data, axis=1), zeros, axis=1)\n acq.resize(eNx, acq.active_channels, acq.trajectory_dimensions)\n acq.data[:]=padded_acq_data\n\n # Remove oversampling if needed\n if eNx != rNx:\n #xline = transform.transform_kspace_to_image(acq.data, [1])\n xline = transform.transform_kspace_to_image(acq.data, dim=(1,), img_shape=(eNx,))\n x0 = int((eNx - rNx) / 2)\n x1 = int((eNx - rNx) / 2 + rNx)\n xline = xline[:, x0:x1]\n acq.resize(rNx, acq.active_channels, acq.trajectory_dimensions)\n acq.center_sample = int(rNx / 2)\n # need to use the [:] notation here to fill the data\n acq.data[:] = transform.transform_image_to_kspace(xline, dim=(1,), k_shape=(rNx,))\n\n # Stuff into the buffer\n rep = acq.idx.repetition\n contrast = acq.idx.contrast\n slice = acq.idx.slice\n y = acq.idx.kspace_encode_step_1\n z = acq.idx.kspace_encode_step_2\n all_data[rep, contrast, slice, :, z, y, :] = acq.data\n\n # Reconstruct images\n images = np.zeros((nreps, ncontrasts, nslices, eNz, rNy, rNx), dtype=np.float32)\n img_scaled = []\n for rep in range(nreps):\n for contrast in range(ncontrasts):\n for slice in range(nslices):\n # FFT\n if eNz > 1:\n # 3D\n im = transform.transform_kspace_to_image(all_data[rep, contrast, slice, :, :, :, :], [1, 2, 3])\n else:\n # 2D\n im = transform.transform_kspace_to_image(all_data[rep, contrast, slice, :, 0, :, :], [2, 3])\n\n if eNy != rNy:\n x0 = int((eNy - rNy) / 2)\n x1 = int((eNy - rNy) / 2 + rNy)\n im = im[:,:,x0:x1, :]\n\n # Sum of squares\n im = np.sqrt(np.sum(np.abs(im) ** 2, 0))\n\n # Stuff into the output\n if eNz > 1:\n # 3D\n images[rep, contrast, slice, :, :, :] = im\n else:\n # 2D\n images[rep, contrast, slice, 0, :, :] = im\n\n img_scaled.append(im)\n\n dset.close()\n\n return [head, hdr, img_scaled]", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"data/analytics\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def test_3d_steam_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/full3D.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n write_readback(dic,data)", "def clustering_dbscan_o3d():\n pass", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://udacity-nanodegree-data-engineer/\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def reInitAndRun(self):\n self.playlists = self.readPlaylistData()\n self.audioDF = self.readAudioData(shouldProcess=True)\n self.clusterLabels = []\n self.models = Clusterers(k=len(self.playlists))\n self.processAndCluster()\n self.analyzeResults()", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def main():\n spark = create_spark_session()\n\n # Used for local testing - commented out\n # input_data = \"./data/\"\n # output_data = \"./data/\"\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://allen-lesson4-datalake-bucket/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)\n spark.stop()", "def initialize(args):\n # save the data directory in ribfrac/environ.py\n data_dir = args.data_dir\n with open(\"ribfrac/environ.py\", \"w\") as f:\n f.write(f'DATA_DIR = \"{args.data_dir}\"')\n\n # create data_dir if it doesn't exist\n if not os.path.exists(data_dir):\n print(f\"Data directory {data_dir} doesn't exist and is automatically\"\n \" created.\")\n os.mkdir(data_dir)", "def setup(self):\n\n folder_name, file_name, url, md5 = self.resource\n dataset_folder = os.path.join(self.data_root, folder_name)\n if not os.path.exists(dataset_folder):\n sh_utils.download_and_extract_archive(url, dataset_folder, md5, file_name)\n\n test_transform = tv_transforms.Compose(\n [\n tv_transforms.ToTensor(),\n tv_transforms.Lambda(lambda x: x.permute(1, 2, 0)),\n ]\n )\n\n dataset_out = tv_datasets.ImageFolder(\n root=dataset_folder, transform=test_transform\n )\n self.images_only_dataset_out = sh_data_torch.IndexedTorchDataset(\n sh_data_torch.ImagesOnlyTorchDataset(dataset_out)\n )", "def test_3d_time_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/data/test%03d.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n lowmem_write_readback_3D(dic,data)", "def __init__(self, filepath='multidrcl', suffix='DRCL', extension='.IMG', lblext='.LBL', force_read=True, unit='s', feature='sh', eye='L', do_print=True, initdatadir=None, initdata=None, readintuple=None):\n\n Dataset.__init__(self, None, \"mastcam\")\n\n if readintuple != None:\n (self.data, self.fullimages, self.segmentation, self.labels, self.xlabel, self.ylabel, self.xvals, self.rgbdict, self.lblext) = readintuple[0:9]\n if initdata != None:\n self.initdata = initdata\n if self.initfilename != None:\n self.initfilename = initarchive\n else:\n self.initfilename = 'param'\n return\n \n if do_print: print(filepath)\n \n if filepath == '388':\n filepath = '/proj/imbue/data/msl-mastcam/sol388/'\n \n if filepath == 'multidrcl':\n filepath = '/proj/imbue/data/msl-mastcam/multispectral_drcl/'\n \n self.filepath = filepath\n self.xlabel = 'TBD'\n self.ylabel = 'TBD'\n \n #dirname = filepath[:-1]\n #subsetname = dirname.split('/')[-1]\n subsetname = os.path.basename(filepath)\n self.name += \"-\" + subsetname\n if len(suffix) > 0:\n self.name += \"-\" + eye + '-' + suffix + '-' + unit + '-' + feature\n if do_print: print(\"Dataset name: \" + self.name)\n \n self.data = []\n self.cadence = []\n \n self.unit = unit\n self.feature = feature\n self.eye = eye\n\n self.rgbdict = {}\n self.extension = extension\n self.lblext = lblext\n self.suffix = suffix\n \n self.archive = os.path.join(filepath,\n subsetname + eye + \"_\" + suffix + '_' + unit + '_' + feature + \".pkl\")\n\n if initdata != None:\n self.initdata = initdata\n if self.initfilename != None:\n self.initfilename = initarchive\n else:\n self.initfilename = 'param'\n elif initdatadir != None:\n print(\"Reading in initialization data...\")\n #initsubsetname = initdatadir[:-1].split('/')[-1]\n initsubsetname = os.path.basename(initdatadir)\n initarchive = os.path.join(initdatadir,\n initsubsetname + eye + \"_\" + suffix + '_' + unit + '_' + feature + \".pkl\")\n if os.path.exists(initarchive):\n with open(initarchive, 'r') as f:\n self.initdata = pickle.load(f)[0]\n self.initfilename = initarchive\n print(\"...done!\")\n print(\"initdata.shape:\", self.initdata.shape)\n else:\n print(\"...initialization data does not exist!\")\n print(\"Desired pickle was: %s\" % initarchive)\n \n # Determine if we need to preprocess the data\n if (not os.path.exists(self.archive)) or force_read:\n self.read_mastcam_dir(filepath, suffix, unit, feature, extension, lblext, eye)\n else:\n if do_print: print(\"Found pickle at \" + self.archive)\n \n self.readin()", "def RunData(files, wavelength=None, out='testdata'):\n for i, file in enumerate(files):\n forwardModel(file=file, out='results/%s%i' % (out, i), wavelength=wavelength)", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def xrds_from_dir(path=None, fjord=None, metastr='_mdf', bitmask=False):\n warnings.warn(\"This function currently assumes a constant grid and EPSG for all input files\")\n assert fjord != None, \"You must specify the fjord code for these DEMs\"\n\n files = [f for f in os.listdir(path) if f.endswith('dem.tif')]\n\n # for DEMs nested in directories\n if len(files) == 0:\n try: \n os.remove(path+'.DS_Store')\n except FileNotFoundError:\n pass\n dirs = [dir for dir in os.listdir(path)]\n nestfiles=[]\n for dir in dirs:\n nestfiles.append([dir+'/'+f for f in os.listdir(path+dir) if f.endswith('dem.tif')])\n files = [items for nest in nestfiles for items in nest]\n\n i=0\n darrays = list(np.zeros(len(files)))\n dtimes = list(np.zeros(len(files)))\n for f in files:\n print(f)\n\n metaf = f.rpartition(\"_dem.tif\")[0] + metastr + \".txt\"\n try:\n meta = read_meta(path+metaf)\n # print(meta)\n dtimes[i] = get_DEM_img_times(meta)\n except FileNotFoundError:\n print(\"You must manually enter dtimes for these files within the code\")\n # dtimes[0] = dt.datetime(2012, 6, 29, hour=15, minute=26, second=30)\n # dtimes[1] = dt.datetime(2010, 8, 14, hour=15, minute=34)\n # except KeyError:\n # raise\n except AssertionError:\n print(\"These stereo image times are >30 min apart... skipped\")\n continue\n\n try:\n darrays[i] = read_DEM(path+f, fjord)\n # darrays[i] = read_DEM(path+f.rpartition(\"_dem.tif\")[0] + \"_dem_geoidcomp.tif\")\n except RasterioIOError:\n print(\"RasterioIOError on your input file\")\n break\n \n # read in and apply the bitmask\n if bitmask==True:\n bitmaskfn = path + f.rpartition(\"dem.tif\")[0] + \"bitmask.tif\"\n maskarr = read_mask(bitmaskfn, fjord)\n darrays[i] = darrays[i].where(maskarr==0, )\n \n i = i + 1\n\n if len(darrays)==1 and np.all(darrays[0]) == 0:\n warnings.warn(\"Your DEM will not be put into XArray\")\n return \"nodems\"\n\n elif len(darrays)>1 and np.all(darrays[darrays!=0]) == 0:\n warnings.simplefilter(\"always\")\n warnings.warn(\"None of your DEMs will be put into XArray\")\n return \"nodems\"\n\n else:\n # I just discovered xarray's mfdataset with the preprocess option to modify each dataset prior to opening. I'm guessing that'd be the way to go here\n # darr = xr.combine_nested(darrays, concat_dim=['dtime'])\n darr = xr.concat(darrays, \n dim=pd.Index(dtimes, name='dtime'), \n # coords=['x','y'], \n join='outer').chunk({'dtime': 1, 'x':3072, 'y':3072}) # figure out a better value for chunking this (it slows the JI one with 3 dems way down)\n # combine_attrs='no_conflicts' # only in newest version of xarray\n\n try:\n for arr in darrays:\n arr.close()\n except:\n pass\n del darrays\n \n # convert to dataset with elevation as a variable and add attributes\n attr = darr.attrs\n ds = darr.to_dataset()\n\n # coarsen the data to 4 m resolution to reduce memory crashes during processing\n # note that this may be important in later steps when resolution is used as an input\n coarse = 2\n if coarse > 1:\n print(\"Your input DEMs will be downsampled to enable processing\")\n ds = ds.coarsen(x=coarse, y=coarse, boundary='pad').mean()\n ds = ds.chunk({'dtime': 1, 'x':3072, 'y':3072})\n\n ds.attrs = attr\n ds.attrs['fjord'] = fjord\n ds.attrs['res'] = tuple(x * coarse for x in attr['res'])\n attr=None\n\n # newest version of xarray (0.16) has promote_attrs=True kwarg. Earlier versions don't...\n # ds = ds.to_dataset(name='elevation', promote_attrs=True).squeeze().drop('band')\n \n # using rioxarray means the transform is read in/created as part of the geospatial info, so it's unnecessary to manually create a transform\n # create affine transform for concatted dataset\n print('Please note the transform is computed assuming a coordinate reference system\\\n where x(min) is west and y(min) is south')\n # inputs: west, south, east, north, width, height\n # don't use len(x,y) for width and height in case they're not continuous\n width = abs((ds.x.max().item() - ds.x.min().item())/ds.attrs['res'][0])\n ht = abs((ds.y.max().item() - ds.y.min().item())/ds.attrs['res'][1])\n transform = rasterio.transform.from_bounds(ds.x.min().item()-0.5*ds.attrs['res'][0], ds.y.min().item()-0.5*ds.attrs['res'][1], \n ds.x.max().item()+0.5*ds.attrs['res'][0], ds.y.max().item()+0.5*ds.attrs['res'][1], \n width, ht)\n ds.attrs['transform'] = transform\n # set the transform and crs as attributes since that's how they're accessed later in the pipeline\n # ds.attrs['transform'] = (ds.spatial_ref.GeoTransform)\n # ds.attrs['crs'] = ds.spatial_ref.crs_wkt\n\n\n return ds", "def main(args, **kwargs):\n data_file = os.path.join(kwargs['data_dir'],\n '{}_{}m_{}_{}.npz'.format(args.area, args.resolution, args.year, args.tar_date))\n data = np.load(data_file)\n tar_label_mat = data['label_mat']\n tar_dynamic_mat = data['dynamic_mat']\n static_mat, tar_static_mat = data['static_mat'], data['static_mat']\n mapping_mat = data['mapping_mat']\n dynamic_features, static_features = list(data['dynamic_features']), list(data['static_features'])\n\n \"\"\" extract information from all time period \"\"\"\n dynamic_mat, label_mat = [], []\n for date in args.dates:\n data_file = os.path.join(kwargs['data_dir'],\n '{}_{}m_{}_{}.npz'.format(args.area, args.resolution, args.year, date))\n data = np.load(data_file)\n dynamic_mat.append(data['dynamic_mat'])\n label_mat.append(data['label_mat'])\n dynamic_mat = np.concatenate(dynamic_mat)\n label_mat = np.concatenate(label_mat)\n\n data_obj = DataObj(label_mat, dynamic_mat, static_mat,\n tar_label_mat, tar_dynamic_mat, tar_static_mat,\n dynamic_features, static_features, mapping_mat)\n\n \"\"\" load train, val, test locations \"\"\"\n data_obj.train_loc, data_obj.val_loc, data_obj.test_loc = load_train_val_test(kwargs['train_val_test_file'], args)\n\n data_obj.train_y = data_obj.gen_train_val_test_label(data_obj.label_mat, data_obj.train_loc)\n data_obj.val_y = data_obj.gen_train_val_test_label(data_obj.label_mat, data_obj.val_loc)\n data_obj.test_y = data_obj.gen_train_val_test_label(data_obj.tar_label_mat, data_obj.test_loc)\n\n logging.info('Number of features = {}.'.format(data_obj.n_features))\n logging.info('Number of dynamic features = {}.'.format(data_obj.n_dynamic_features))\n logging.info('Number of static features = {}.'.format(data_obj.n_static_features))\n logging.info('Number of time points = {}.'.format(data_obj.n_times))\n logging.info('Shape of the matrix = ({}, {}).'.format(data_obj.n_rows, data_obj.n_cols))\n\n \"\"\" normalize data \"\"\"\n data_obj.dynamic_x = normalize_mat(data_obj.dynamic_mat, if_retain_last_dim=True)\n data_obj.static_x = normalize_mat(data_obj.static_mat, if_retain_last_dim=True)\n data_obj.tar_dynamic_x = normalize_mat(data_obj.tar_dynamic_mat, if_retain_last_dim=True)\n data_obj.tar_static_x = normalize_mat(data_obj.tar_static_mat, if_retain_last_dim=True)\n\n \"\"\" load auto-encoder model \"\"\"\n ae = torch.load(os.path.join(kwargs['model_dir'], kwargs['ae_model_name'] + '.pkl'))\n\n \"\"\" define DeepAP model \"\"\"\n dap = DeepAP(in_dim=data_obj.n_features,\n ae_en_h_dims=[64, 32, 16],\n ae_de_h_dims=[16, 32, 64],\n\n conv_lstm_in_size=(data_obj.n_rows, data_obj.n_cols),\n conv_lstm_in_dim=args.ae_h_dim, # ae_h_dim\n conv_lstm_h_dim=[args.dap_h_dim], # dap_h_dim\n conv_lstm_kernel_sizes=args.kernel_sizes, # kernel_sizes\n conv_lstm_n_layers=1,\n\n fc_in_dim=args.dap_h_dim * len(args.kernel_sizes),\n fc_h_dims=args.fc_h_dims, # fc_h_dims\n fc_out_dim=1,\n\n ae_pretrain_weight=ae.state_dict(),\n if_trainable=True,\n fc_p_dropout=0.1,\n\n mask_thre=args.mask_thr,\n device=kwargs['device'])\n\n dap = dap.to(kwargs['device'])\n train(dap, data_obj, args, **kwargs)", "def construct_data_path (self, scene_rpath, category):\n dirname, basename = os.path.split (scene_rpath)\n baseprefix, basesuffix = os.path.splitext (basename)\n data_rpath = os.path.join ('data', category, baseprefix + '.dsf')\n return data_rpath", "def main():\n train_src = read_file(SRC_TRAIN)\n train_tgt = read_file(TRGT_TRAIN)\n val_src = read_file(SRC_VAL)\n val_tgt = read_file(TRGT_VAL)\n # val = read_files(VAL_FILES)\n np.savez(\n DATA_NPZ_NAME, train_src=train_src, train_tgt=train_tgt, val_src=val_src, val_tgt=val_tgt)", "def test_3d_steam_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/full3D.ft3\")\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n write_readback(dic,data)", "def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def create_dataset():\n with open(\"/root/config.json\", \"r\") as f:\n config = json.load(f)\n\n # create environmental variables\n for (key, value) in config.items():\n os.environ[key] = str(value)\n\n # run blender\n command = '/usr/lib/blender/blender {} --python {} --background'.\\\n format(\"/root/models/default.blend\", \"/root/rendering.py\")\n os.system(command)\n\n # post processing\n post_processing()", "def run(self):\r\n __data__ = abspath(join(dirname( __file__ ), '..', 'data'))\r\n files = [ f for f in listdir(__data__) \r\n if isfile(join(__data__,f)) ]\r\n\r\n # Spawn processes\r\n pids = []\r\n for index, ts_name in enumerate(files):\r\n if ts_name == \".DS_Store\":\r\n \tcontinue\r\n\r\n __data__ = abspath(join(dirname( __file__ ), '..', 'data'))\r\n with open(join(__data__ + \"/\" + ts_name), 'r') as f:\r\n timeseries = json.loads(f.read())\r\n p = Process(target=run_algorithms, args=(timeseries, ts_name))\r\n pids.append(p)\r\n p.start()\r\n\r\n # Send wait signal to zombie processes\r\n for p in pids:\r\n p.join()", "def main(inputfolder):\n inputfolder = realpath(inputfolder)\n for data in DATASET:\n for fol in FOLDERS:\n actfile = join(inputfolder, data, data+'.txt')\n logger.info('Changing data in: %s' % actfile)\n filedata = []\n with open(actfile) as fin:\n for line in fin:\n id, y = map(int, line.strip().split('\\t'))\n if y == -1000:\n y = 0\n path = join(inputfolder, 'data'+str(data), action, 'original', str(id)+'.jpg')\n filedata.append((path, y))\n path = join(inputfolder, 'data'+str(data), action, 'original', str(id+1)+'.jpg')\n filedata.append((path, y))\n with open(actfile, 'w') as fout:\n for path, y in filedata:\n fout.write('%s %d\\n' % (path, y))", "def test_3d_steam_time_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/full3D.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n lowmem_write_readback(dic,data)", "def process_scene_data(self, scene, data, tmp_dir):\n scene_dir = join(tmp_dir, str(scene.id))\n img_dir = join(scene_dir, 'img')\n labels_dir = join(scene_dir, 'labels')\n\n make_dir(img_dir)\n make_dir(labels_dir)\n\n for ind, (chip, window, labels) in enumerate(data):\n chip_path = join(img_dir, '{}-{}.png'.format(scene.id, ind))\n label_path = join(labels_dir, '{}-{}.png'.format(scene.id, ind))\n\n label_im = labels.get_label_arr(window).astype(np.uint8)\n save_img(label_im, label_path)\n save_img(chip, chip_path)\n\n return scene_dir", "def setup():\n print('...')\n # Make sure dirs exist\n for directory in [DATA_DIR, DATA_INPUT_DIR, DATA_OUTPUT_DIR]:\n os.makedirs(directory, exist_ok=True)", "def read_data(dirs, idx_90d, idx_0d, idx_45d, idx_m45d, img_size):\n raw_data_90d = np.zeros(shape=(img_size, img_size, 3, 9, len(dirs)), dtype=np.float32)\n raw_data_0d = np.zeros(shape=(img_size, img_size, 3, 9, len(dirs)), dtype=np.float32)\n raw_data_45d = np.zeros(shape=(img_size, img_size, 3, 9, len(dirs)), dtype=np.float32)\n raw_data_m45d = np.zeros(shape=(img_size, img_size, 3, 9, len(dirs)), dtype=np.float32)\n raw_label = np.zeros(shape=(img_size, img_size, len(dirs)), dtype=np.float32)\n\n i_scence = 0\n for dir in dirs:\n print(\"loading...\", dir)\n for idx in range(len(idx_0d)):\n raw_data_90d[:, :, :, idx, i_scence] = np.float32(\n imageio.imread(dir + '/input_Cam0%02d.png' % (idx_90d[idx])))\n raw_data_0d[:, :, :, idx, i_scence] = np.float32(\n imageio.imread(dir + '/input_Cam0%02d.png' % (idx_0d[idx])))\n raw_data_45d[:, :, :, idx, i_scence] = np.float32(\n imageio.imread(dir + '/input_Cam0%02d.png' % (idx_45d[idx])))\n raw_data_m45d[:, :, :, idx, i_scence] = np.float32(\n imageio.imread(dir + '/input_Cam0%02d.png' % (idx_m45d[idx])))\n raw_label[:, :, i_scence] = np.array(read_pfm(dir + '/gt_disp_lowres.pfm'), dtype=np.float32)\n i_scence += 1\n return raw_data_90d, raw_data_0d, raw_data_45d, raw_data_m45d, raw_label", "def load_data_in_folder(self):\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in range(0, idx_max-1):\n data = []\n for f in self.filenames[idx:idx+64]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def fetch_sherbrooke_3shell():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n url = 'https://dl.dropboxusercontent.com/u/2481924/sherbrooke_data/'\n uraw = url + '3shells-1000-2000-3500-N193.nii.gz'\n ubval = url + '3shells-1000-2000-3500-N193.bval'\n ubvec = url + '3shells-1000-2000-3500-N193.bvec'\n folder = pjoin(dipy_home, 'sherbrooke_3shell')\n\n md5_list = ['0b735e8f16695a37bfbd66aab136eb66', # data\n 'e9b9bb56252503ea49d31fb30a0ac637', # bval\n '0c83f7e8b917cd677ad58a078658ebb7'] # bvec\n\n url_list = [uraw, ubval, ubvec]\n fname_list = ['HARDI193.nii.gz', 'HARDI193.bval', 'HARDI193.bvec']\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading raw 3-shell data (184MB)...')\n\n for i in range(len(md5_list)):\n _get_file_data(pjoin(folder, fname_list[i]), url_list[i])\n check_md5(pjoin(folder, fname_list[i]), md5_list[i])\n\n print('Done.')\n print('Files copied in folder %s' % folder)\n else:\n print('Dataset is already in place. If you want to fetch it again, please first remove the folder %s ' % folder)", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://dend-bucket-cpm/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)", "def run_pipeline(directory):\n\n # io = IO(path)\n # df = io.load_cleaned_file(download_always=False)\n # df = add_choke_events(df)\n\n # Add calls to features.Xxx here\n\n #directory = main_directory\n site=os.listdir(directory)\n site_dicom={}\n site_dicom_sub={}\n site_sub_files={}\n i,k,j=0,0,0\n for filename in site:\n site_dicom[i]=directory+'/'+filename+'/DICOM-raw'\n temporary_path=os.listdir(site_dicom[i])\n\n for another_file in temporary_path:\n site_dicom_sub[j]=site_dicom[i]+'/'+another_file+'/scans'\n temporary_path_1 = os.listdir(site_dicom_sub[j])\n for another_file_1 in temporary_path_1:\n site_sub_files[k]=site_dicom_sub[j]+'/'+another_file_1+'/'\n k=k+1\n j = j + 1\n i=i+1\n splitted={}\n output_mif={}\n for i in range (len(site_sub_files)):\n splitted[i]=site_sub_files[i].split('/')\n output_mif[i]=directory+'/'+splitted[i][5]+'/MIF-raw/'+splitted[i][5]+'_'+splitted[i][7]+'_'+splitted[i][9]+'.mif'\n\n\n # save (or return) dataframe here?\n return site_sub_files,output_mif", "def process_data(bc_data_dir):\r\n # Load the file store. \r\n # In the future (TODO) move this to a seperate thread.\r\n states, actions = [], []\r\n shards = [x for x in os.listdir(bc_data_dir) if x.endswith('.npy')]\r\n print(\"Processing shards: {}\".format(shards))\r\n for shard in shards:\r\n shard_path = os.path.join(bc_data_dir, shard)\r\n with open(shard_path, 'rb') as f:\r\n data = np.load(f)\r\n shard_states, unprocessed_actions = zip(*data)\r\n shard_states = [x.flatten() for x in shard_states]\r\n \r\n # Add the shard to the dataset\r\n states.extend(shard_states)\r\n actions.extend(unprocessed_actions)\r\n\r\n states = np.asarray(states, dtype=np.float32)\r\n actions = np.asarray(actions, dtype=np.float32)/2\r\n print(\"Processed with {} pairs\".format(len(states)))\r\n return states, actions", "def read_data(self):\n self.data = reduce_spectrum(self.filename)", "def main():\n spark = create_spark_session()\n\n input_data = \"s3a://udacitydenanodegree2020/\"\n output_data = \"s3a://udacitydenanodegree2020/output/\"\n\n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def run_by_list( dataset_file ):\n three_paths = []\n # From one input path, build the three paths to send on to move_and_record:\n with open( dataset_file, 'r' ) as f:\n for line in f:\n vnh = line.strip()\n three_paths.append( ( os.path.join('/p/css03/scratch/',vnh),\n vnh,\n os.path.join('/p/css03/esgf_publish/',vnh) ) )\n\n # Build the filename suffix:\n if dataset_file[:16]=='unmoved_datasets':\n suffix = dataset_file[17:] # the identifying date, e.g. 2019-06-10 12:52:21\n else:\n suffix = ''\n time = str(datetime.datetime.now())\n suffix = '_'.join([suffix,time])\n suffix = suffix.replace(' ','_')\n\n # Do the real work:\n move_and_record( three_paths, suffix )", "def test_3d_steam_freq():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/full3D.ft3\")\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n lowmem_write_readback(dic,data)", "def download_dataset(base_dir, scene):\n\n # setup depends on dataset\n if len(scene.split('_')) == 1: # default\n modality, part = None, None # declaration necessary for instatiation check\n base_dir = Path(base_dir).expanduser().joinpath(scene)\n filepath_data = base_dir.joinpath(DATASETS_CONFIG[scene]['img']['name'])\n filepath_labels = base_dir.joinpath(DATASETS_CONFIG[scene]['gt']['name'])\n \n elif len(scene.split('_')) == 3: # AeroRIT\n scene, modality, part = scene.split('_')\n base_dir = Path(base_dir).expanduser().joinpath(scene)\n filepath_data = base_dir.joinpath(DATASETS_CONFIG[scene][modality]['img']['name'])\n filepath_labels = base_dir.joinpath(DATASETS_CONFIG[scene][modality]['gt']['name'])\n else :\n raise RuntimeError('Given scene unknown!')\n\n base_dir.mkdir(parents=True, exist_ok=True)\n\n # download data and load from file\n if filepath_data.suffix == '.mat': # datasets from ehu.es\n if not filepath_data.is_file():\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=\"Downloading {}\".format(filepath_data)) as t:\n url = DATASETS_CONFIG[scene]['img']['url']\n urlretrieve(url, filename=filepath_data, reporthook=t.update_to)\n\n if not filepath_labels.is_file():\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=\"Downloading {}\".format(filepath_labels)) as t:\n url = DATASETS_CONFIG[scene]['gt']['url']\n urlretrieve(url, filename=filepath_labels, reporthook=t.update_to)\n \n data = loadmat(filepath_data)[DATASETS_CONFIG[scene]['img']['key']]\n labels = loadmat(filepath_labels)[DATASETS_CONFIG[scene]['gt']['key']]\n\n elif filepath_data.suffix == '.tif': # aerorit\n if not filepath_data.is_file(): # download image if necessary\n print(\"Downloading {}\".format(filepath_data))\n url = DATASETS_CONFIG[scene][modality]['img']['url']\n gdown.download(url=url, output=str(filepath_data), quiet=False)\n\n if not filepath_labels.is_file(): # download labels if necessary\n print(\"Downloading {}\".format(filepath_labels))\n url = DATASETS_CONFIG[scene][modality]['gt']['url']\n gdown.download(url=url, output=str(filepath_labels), quiet=False)\n \n # extract part of image as defined in Rangnekar et al.\n base_dir = base_dir.joinpath(modality).joinpath(part)\n base_dir.mkdir(parents=True, exist_ok=True)\n \n # check early if data exists already to avoid unecessarily loading and encoding data\n filepath_hdf = base_dir.joinpath(f'aerorit_{modality}_{part}.h5')\n if filepath_hdf.is_file():\n return filepath_hdf\n\n # extract defined part of dataset\n start_col = DATASETS_CONFIG[scene][part]['start_col']\n end_col = DATASETS_CONFIG[scene][part]['end_col']\n \n data = np.transpose(io.imread(filepath_data), (1,2,0))[53:,7:,:]\n data = data[:, start_col:end_col, :]\n\n labels = encode_labelmap(io.imread(filepath_labels), AERORIT_COLOURLABELMAP)[53:,7:]\n labels = labels[:, start_col:end_col]\n filepath_data = filepath_hdf\n\n filepath_hdf = filepath_data.with_suffix('.h5')\n \n # export data and labels to hdf\n if not filepath_hdf.is_file():\n with h5py.File(filepath_hdf, \"w\") as f:\n f.create_dataset(\"data\", data=data)\n f.create_dataset(\"labels\", data=labels)\n f.attrs['scene'] = scene\n if not modality is None:\n f.attrs['modality'] = modality\n if not part is None:\n f.attrs['part'] = part\n return filepath_hdf\n\n return filepath_hdf", "def process_raw_data(data_dir='/home/data/nbc/athena/athena-data/'):\n\n # Calls the process_corpus function, defined below\n # process_corpus reads in the text, performs abbreviation, spelling,\n # translation, and overall text Processing\n # process_corpus outputs the processed text for each file and the stemmed file\n for feature_source in ['abstract', 'full']:\n process_corpus(data_dir, feature_source)\n\n # Calls the label_data function, defined below\n # label_data reads in the metadata csv files, concatenates them, then\n # reads in the processed text files\n # label_data outputs a binary pmid by label metadata matrix\n label_data(data_dir)\n generate_gazetteer(data_dir)", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def load_data(root_dir: Optional[str] = None) -> DirDataset:\n if root_dir is None:\n root_dir = os.path.join(str(Path.home()), 'fastestimator_data', 'NIH_Chestxray')\n else:\n root_dir = os.path.join(os.path.abspath(root_dir), 'NIH_Chestxray')\n os.makedirs(root_dir, exist_ok=True)\n\n image_extracted_path = os.path.join(root_dir, 'images')\n\n if not os.path.exists(image_extracted_path):\n # download data\n links = [\n 'https://nihcc.box.com/shared/static/vfk49d74nhbxq3nqjg0900w5nvkorp5c.gz',\n 'https://nihcc.box.com/shared/static/i28rlmbvmfjbl8p2n3ril0pptcmcu9d1.gz',\n 'https://nihcc.box.com/shared/static/f1t00wrtdk94satdfb9olcolqx20z2jp.gz',\n 'https://nihcc.box.com/shared/static/0aowwzs5lhjrceb3qp67ahp0rd1l1etg.gz',\n 'https://nihcc.box.com/shared/static/v5e3goj22zr6h8tzualxfsqlqaygfbsn.gz',\n 'https://nihcc.box.com/shared/static/asi7ikud9jwnkrnkj99jnpfkjdes7l6l.gz',\n 'https://nihcc.box.com/shared/static/jn1b4mw4n6lnh74ovmcjb8y48h8xj07n.gz',\n 'https://nihcc.box.com/shared/static/tvpxmn7qyrgl0w8wfh9kqfjskv6nmm1j.gz',\n 'https://nihcc.box.com/shared/static/upyy3ml7qdumlgk2rfcvlb9k6gvqq2pj.gz',\n 'https://nihcc.box.com/shared/static/l6nilvfa9cg3s28tqv1qc1olm3gnz54p.gz',\n 'https://nihcc.box.com/shared/static/hhq8fkdgvcari67vfhs7ppg2w6ni4jze.gz',\n 'https://nihcc.box.com/shared/static/ioqwiy20ihqwyr8pf4c24eazhh281pbu.gz'\n ]\n data_paths = [os.path.join(root_dir, \"images_{}.tar.gz\".format(x)) for x in range(len(links))]\n for idx, (link, data_path) in enumerate(zip(links, data_paths)):\n _download_data(link, data_path, idx, len(links))\n\n # extract data\n for idx, data_path in enumerate(data_paths):\n print(\"Extracting {}, file {} / {}\".format(data_path, idx + 1, len(links)))\n with tarfile.open(data_path) as img_tar:\n img_tar.extractall(root_dir)\n\n return DirDataset(image_extracted_path, file_extension='.png', recursive_search=False)", "def r3d(**kwargs):\n\n return _video_resnet('r3d',\n block=BasicBlock,\n conv_makers=[Conv3DSimple] * 4,\n layers=[NUM_LAYER, NUM_LAYER, NUM_LAYER, NUM_LAYER],\n stem=BasicStem, **kwargs)", "def download_fermi_crab_3fhl():\n download_data_files(FILENAMES_FERMI_3FHL_CRAB)", "def main(dataset):\n # Save everything 'MNE_DATA' dir ... defaults to ~/mne_data\n mne_data_dir = mne.get_config(key='MNE_DATA', default=False)\n if not mne_data_dir:\n mne.set_config('MNE_DATA', str(DEFAULT_DATA_DIR))\n DEFAULT_DATA_DIR.mkdir(exist_ok=True)\n mne_data_dir = DEFAULT_DATA_DIR\n else:\n mne_data_dir = Path(mne_data_dir)\n\n ds_names = DATASET_OPTIONS.keys() if not dataset else (dataset,)\n\n for ds_name in ds_names:\n print('\\n----------------------')\n ds_path = mne_data_dir / ds_name\n _download(ds_name=ds_name, ds_path=ds_path)", "def run(_store):\n\n\t# ###################################################### FRAMEWORK SETTINGS ######################################################\n\n\t# TODO: Play with these settings\n\t#data_column_names =['RLAaccx', 'RLAaccy', 'RLAaccz']\n\tdata_column_names =['RLAaccx', 'RLAaccy', 'RLAaccz','RLAgyrx', 'RLAgyry', 'RLAgyrz','RLAmagx', 'RLAmagy', 'RLAmagz','RLAroll', 'RLApitch', 'RLAyaw'] # Right hand accelerometer; check dataset.py for feature names\n\tp_TM = 0.6 # p(TM) - the entry probability of the garbage HMM; p(G) = (1 - p(TM)); See Lee & Kim paper\n\textension_len = 0 # Additional samples before and after the training label to compensate for cut-offs\n\n\t# No need to change these\n\toutput_dir = 'img/' # Directory is needed for intermediate output when plotting\n\tdata_dimension = len(data_column_names)\n\t# Available labels:\n\t# - 17: nested label, separates training data from free-living data, this should NOT be used directly\n\t# - 18: drinking gesture\n\t# - 19: drinking gesture\n\t# - 20: drinking gesture\n\t# - 21: drinking gesture\n\tspotter_label = 90 # Virtual label for marking spotted gestures\n\n\t# Label color lookup table; 'spotter_label' is for the spotted events\n\tlabel_to_color = {17: 'c', 18: 'b', 19: 'g', 20: 'r', 21: 'm', spotter_label: 'y'}\n\n\t# ###################################################### TRAINING DATA 18 ######################################################\n\tprint ('\\nExtracting training data ...')\n\n\t# Fetch the TRAINING dataset, this is a list of data frames\n\t# TODO: Increase the training pool\n\ttraining_participants = [11,15] # Can pick from [11, 13, 15]\n\ttraining_labels = [18] # Can pick from [18, 19, 20, 21]; see above\n\ttraining_type = 'both' # Can be initial 'training' data (scripted), 'free-living' (office) data or 'both'\n\ttraining_dfs18 = list(dataset.gen_segments(\n\t\t_store, _participants=training_participants,\n\t\t_labels=training_labels, columns=data_column_names,\n\t\t_extend_front=extension_len, _extend_back=extension_len,\n\t\t_type=training_type\n\t\t)\n\t)\n\t\n\t# Plot some (3) training gestures\n\t# TODO: try this\n\tif False:\n\t\tfor i, train_df in zip(range(3), training_dfs18):\n\t\t\ttrain_df.plot()\n\t\t\tplt.title('TRAINING set %d' % i)\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\t\t\tplt.tight_layout()\n\t\t\tplt.show()\n\n\t# Print some statistics of the training set\n\tprint ('gesture sample statistics:')\n\tsample_lengths = [df.shape[0] for df in training_dfs18]\n\tprint ('training instances:'), len(sample_lengths)\n\tprint ('mean length: '), np.average(sample_lengths)\n\tprint ('median len: '), np.median(sample_lengths)\n\tprint ('min length: '), min(sample_lengths)\n\tprint ('max length: '), max(sample_lengths)\n# ###################################################### TRAINING DATA 19 ######################################################\n\tprint ('\\nExtracting training data ...')\n\n\t# Fetch the TRAINING dataset, this is a list of data frames\n\t# TODO: Increase the training pool\n\ttraining_participants = [11,15] # Can pick from [11, 13, 15]\n\ttraining_labels = [19] # Can pick from [18, 19, 20, 21]; see above\n\ttraining_type = 'both' # Can be initial 'training' data (scripted), 'free-living' (office) data or 'both'\n\ttraining_dfs19 = list(dataset.gen_segments(\n\t\t_store, _participants=training_participants,\n\t\t_labels=training_labels, columns=data_column_names,\n\t\t_extend_front=extension_len, _extend_back=extension_len,\n\t\t_type=training_type\n\t\t)\n\t)\n\t\n\t# Plot some (3) training gestures\n\t# TODO: try this\n\tif False:\n\t\tfor i, train_df in zip(range(3), training_dfs19):\n\t\t\ttrain_df.plot()\n\t\t\tplt.title('TRAINING set %d' % i)\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\t\t\tplt.tight_layout()\n\t\t\tplt.show()\n\n\t# Print some statistics of the training set\n\tprint ('gesture sample statistics:')\n\tsample_lengths = [df.shape[0] for df in training_dfs19]\n\tprint ('training instances:'), len(sample_lengths)\n\tprint ('mean length: '), np.average(sample_lengths)\n\tprint ('median len: '), np.median(sample_lengths)\n\tprint ('min length: '), min(sample_lengths)\n\tprint ('max length: '), max(sample_lengths)\n\n\n# ###################################################### TRAINING DATA 20 ######################################################\n\tprint ('\\nExtracting training data ...')\n\n\t# Fetch the TRAINING dataset, this is a list of data frames\n\t# TODO: Increase the training pool\n\ttraining_participants = [11,15] # Can pick from [11, 13, 15]\n\ttraining_labels = [20] # Can pick from [18, 19, 20, 21]; see above\n\ttraining_type = 'both' # Can be initial 'training' data (scripted), 'free-living' (office) data or 'both'\n\ttraining_dfs20 = list(dataset.gen_segments(\n\t\t_store, _participants=training_participants,\n\t\t_labels=training_labels, columns=data_column_names,\n\t\t_extend_front=extension_len, _extend_back=extension_len,\n\t\t_type=training_type\n\t\t)\n\t)\n\t\n\t# Plot some (3) training gestures\n\t# TODO: try this\n\tif False:\n\t\tfor i, train_df in zip(range(3), training_dfs20):\n\t\t\ttrain_df.plot()\n\t\t\tplt.title('TRAINING set %d' % i)\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\t\t\tplt.tight_layout()\n\t\t\tplt.show()\n\n\t# Print some statistics of the training set\n\tprint ('gesture sample statistics:')\n\tsample_lengths = [df.shape[0] for df in training_dfs20]\n\tprint ('training instances:'), len(sample_lengths)\n\tprint ('mean length: '), np.average(sample_lengths)\n\tprint ('median len: '), np.median(sample_lengths)\n\tprint ('min length: '), min(sample_lengths)\n\tprint ('max length: '), max(sample_lengths)\n\n# ###################################################### TRAINING DATA 21 ######################################################\n\tprint ('\\nExtracting training data ...')\n\n\t# Fetch the TRAINING dataset, this is a list of data frames\n\t# TODO: Increase the training pool\n\ttraining_participants = [11,15] # Can pick from [11, 13, 15]\n\ttraining_labels = [21] # Can pick from [18, 19, 20, 21]; see above\n\ttraining_type = 'both' # Can be initial 'training' data (scripted), 'free-living' (office) data or 'both'\n\ttraining_dfs21 = list(dataset.gen_segments(\n\t\t_store, _participants=training_participants,\n\t\t_labels=training_labels, columns=data_column_names,\n\t\t_extend_front=extension_len, _extend_back=extension_len,\n\t\t_type=training_type\n\t\t)\n\t)\n\t\n\t# Plot some (3) training gestures\n\t# TODO: try this\n\tif False:\n\t\tfor i, train_df in zip(range(3), training_dfs21):\n\t\t\ttrain_df.plot()\n\t\t\tplt.title('TRAINING set %d' % i)\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\t\t\tplt.tight_layout()\n\t\t\tplt.show()\n\n\t# Print some statistics of the training set\n\tprint ('gesture sample statistics:')\n\tsample_lengths = [df.shape[0] for df in training_dfs21]\n\tprint ('training instances:'), len(sample_lengths)\n\tprint ('mean length: '), np.average(sample_lengths)\n\tprint ('median len: '), np.median(sample_lengths)\n\tprint ('min length: '), min(sample_lengths)\n\tprint ('max length: '), max(sample_lengths)\n\t# ###################################################### TESTING DATA ######################################################\n\n\n\tprint ('\\nExtracting testing data ...')\n\n\t# The test data only serves for testing the model and to get a quick look at the data. It is NOT intended for evaluation\n\n\t# Fetch TEST data; segments of labels and raw sample data\n\ttest_participant = 15\n\ttest_labels = [18] # The labels associated with the gesture\n\ttest_type = 'free-living' # Can be initial 'training' data, 'free-living' data or 'both'\n\ttest_seg = list(dataset.gen_seg_table(_store, _participants=test_participant, _labels=test_labels, _type=test_type)) # For for each subject\n\tassert test_seg # Ensure we got data, otherwise the query was invalid\n\ttest_seg = test_seg[0][1] # Fetch single frame; there is only one\n\ttest_seg = test_seg.iloc[1:2] # Pick the first 10 segments\n\ttest_min_start, test_max_stop = test_seg.iloc[0][0], test_seg.iloc[-1][1] # Limit TEST data frame to region of interest\n\ttest_min_start, test_max_stop = test_min_start - 300, test_max_stop + 300 # Add some margin for visibility\n\t\n\ttest_df = dataset.get_frame(_store, _participant=test_participant, _start=test_min_start, _stop=test_max_stop, columns=data_column_names)\n\n\t# Plot TEST data stream\n\t# TODO: try this\n\tif False:\n\t\tax = test_df.plot()\n\t\tplot_labels(test_seg, label_to_color)\n\t\tlut = {key: col for key, col in label_to_color.iteritems() if key in test_labels} # Reduce legend to used labels\n\t\tadd_labels_to_legend(_ax=ax, _label_to_color=lut)\n\t\tplt.title('TEST set with ground truth; participant=%d' % test_participant)\n\t\tplt.xlabel('Samples')\n\t\tplt.ylabel('Raw data')\n\t\tplt.tight_layout()\n\t\tplt.show()\n\n\t# ###################################################### BUILD HMM 18 ######################################################\n\tprint ('\\nBuilding the gesture model ...')\n\n\t# Example HMM; linear with 3 states; all gaussian\n\t#\n\t# model_entry --> state_1 --> state_2 --> state_3 --> model_exit\n\t# ^ | ^ | ^ |\n\t# \\__/ \\__/ \\__/\n\n\n\t# TODO: Try different models. Models can be nested (i.e., 'model' can have sub-models) as can be seen in the yahmm tutorial.\n\tmodel = Model(name='Drink18')\n\n\t# Gaussian probability distribution function factory\n\tdef make_dist(_mean=0, _std=2):\n\t\treturn make_nd_distribution(data_dimension, NormalDistribution, [_mean, _std])\n\n\t# The name of the State() can be suffixed with color codes:\n\t# * '-start' (green) and '-end' (red) to mark silent entry and exit points\n\t# * '-tm' (blue) to mark derived threshold model states\n\t# * '-meta' (yellow) to mark silent / meta states\n\n\t# TODO: Play with the distribution types and the initial parameters, they do not need to be all the same\n\tmodel_states = []\n\tfor i in range(3): # Generate some states\n\t\ts = State(make_dist(), name=model.name + \"_%d\" % (i + 1))\n\t\tmodel_states.append(s)\n\tmodel.add_states(model_states)\n\tmodel.add_transition(model.start, model_states[0], 1) # Entry transition\n\tmodel.add_transitions(model_states, model_states, [0.8] * len(model_states)) # 80% self loop for each state\n\tmodel.add_transitions(model_states, model_states[1:] + [model.end], [0.2] * len(model_states)) # 20% to-next for each state\n\n\t# Always bake the model after changes to update internal structures and normalize transition probabilities\n\tmodel.bake()\n\n\t# TODO: try this\n\tif False: # Plot and save visual model\n\t\tplot_model(model, _output_file=output_dir + 'drink_initial.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Initial gesture HMM')\n\t\tplt.show()\n\n# ###################################################### BUILD HMM 19 ######################################################\n\tprint ('\\nBuilding the gesture model ...')\n\n\t# Example HMM; linear with 3 states; all gaussian\n\t#\n\t# model_entry --> state_1 --> state_2 --> state_3 --> model_exit\n\t# ^ | ^ | ^ |\n\t# \\__/ \\__/ \\__/\n\n\n\t# TODO: Try different models. Models can be nested (i.e., 'model' can have sub-models) as can be seen in the yahmm tutorial.\n\tmodel19 = Model(name='Drink19')\n\n\t# Gaussian probability distribution function factory\n\tdef make_dist(_mean=0, _std=2):\n\t\treturn make_nd_distribution(data_dimension, NormalDistribution, [_mean, _std])\n\n\t# The name of the State() can be suffixed with color codes:\n\t# * '-start' (green) and '-end' (red) to mark silent entry and exit points\n\t# * '-tm' (blue) to mark derived threshold model states\n\t# * '-meta' (yellow) to mark silent / meta states\n\n\t# TODO: Play with the distribution types and the initial parameters, they do not need to be all the same\n\tmodel_states = []\n\tfor i in range(3): # Generate some states\n\t\ts = State(make_dist(), name=model19.name + \"_%d\" % (i + 1))\n\t\tmodel_states.append(s)\n\tmodel19.add_states(model_states)\n\tmodel19.add_transition(model19.start, model_states[0], 1) # Entry transition\n\tmodel19.add_transitions(model_states, model_states, [0.8] * len(model_states)) # 80% self loop for each state\n\tmodel19.add_transitions(model_states, model_states[1:] + [model19.end], [0.2] * len(model_states)) # 20% to-next for each state\n\n\t# Always bake the model after changes to update internal structures and normalize transition probabilities\n\tmodel19.bake()\n\n\t# TODO: try this\n\tif False: # Plot and save visual model\n\t\tplot_model(model19, _output_file=output_dir + 'drink_initial.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Initial gesture HMM')\n\t\tplt.show()\n\n# ###################################################### BUILD HMM 20 ######################################################\n\tprint ('\\nBuilding the gesture model ...')\n\n\t# Example HMM; linear with 3 states; all gaussian\n\t#\n\t# model_entry --> state_1 --> state_2 --> state_3 --> model_exit\n\t# ^ | ^ | ^ |\n\t# \\__/ \\__/ \\__/\n\n\n\t# TODO: Try different models. Models can be nested (i.e., 'model' can have sub-models) as can be seen in the yahmm tutorial.\n\tmodel20 = Model(name='Drink20')\n\n\t# Gaussian probability distribution function factory\n\tdef make_dist(_mean=0, _std=2):\n\t\treturn make_nd_distribution(data_dimension, NormalDistribution, [_mean, _std])\n\n\t# The name of the State() can be suffixed with color codes:\n\t# * '-start' (green) and '-end' (red) to mark silent entry and exit points\n\t# * '-tm' (blue) to mark derived threshold model states\n\t# * '-meta' (yellow) to mark silent / meta states\n\n\t# TODO: Play with the distribution types and the initial parameters, they do not need to be all the same\n\tmodel_states = []\n\tfor i in range(3): # Generate some states\n\t\ts = State(make_dist(), name=model20.name + \"_%d\" % (i + 1))\n\t\tmodel_states.append(s)\n\tmodel20.add_states(model_states)\n\tmodel20.add_transition(model20.start, model_states[0], 1) # Entry transition\n\tmodel20.add_transitions(model_states, model_states, [0.8] * len(model_states)) # 80% self loop for each state\n\tmodel20.add_transitions(model_states, model_states[1:] + [model20.end], [0.2] * len(model_states)) # 20% to-next for each state\n\n\t# Always bake the model after changes to update internal structures and normalize transition probabilities\n\tmodel20.bake()\n\n\t# TODO: try this\n\tif False: # Plot and save visual model\n\t\tplot_model(model20, _output_file=output_dir + 'drink_initial.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Initial gesture HMM')\n\t\tplt.show()\n\n# ###################################################### BUILD HMM 21 ######################################################\n\tprint ('\\nBuilding the gesture model ...')\n\n\t# Example HMM; linear with 3 states; all gaussian\n\t#\n\t# model_entry --> state_1 --> state_2 --> state_3 --> model_exit\n\t# ^ | ^ | ^ |\n\t# \\__/ \\__/ \\__/\n\n\n\t# TODO: Try different models. Models can be nested (i.e., 'model' can have sub-models) as can be seen in the yahmm tutorial.\n\tmodel21 = Model(name='Drink21')\n\n\t# Gaussian probability distribution function factory\n\tdef make_dist(_mean=0, _std=2):\n\t\treturn make_nd_distribution(data_dimension, NormalDistribution, [_mean, _std])\n\n\t# The name of the State() can be suffixed with color codes:\n\t# * '-start' (green) and '-end' (red) to mark silent entry and exit points\n\t# * '-tm' (blue) to mark derived threshold model states\n\t# * '-meta' (yellow) to mark silent / meta states\n\n\t# TODO: Play with the distribution types and the initial parameters, they do not need to be all the same\n\tmodel_states = []\n\tfor i in range(3): # Generate some states\n\t\ts = State(make_dist(), name=model21.name + \"_%d\" % (i + 1))\n\t\tmodel_states.append(s)\n\tmodel21.add_states(model_states)\n\tmodel21.add_transition(model21.start, model_states[0], 1) # Entry transition\n\tmodel21.add_transitions(model_states, model_states, [0.8] * len(model_states)) # 80% self loop for each state\n\tmodel21.add_transitions(model_states, model_states[1:] + [model21.end], [0.2] * len(model_states)) # 20% to-next for each state\n\n\t# Always bake the model after changes to update internal structures and normalize transition probabilities\n\tmodel21.bake()\n\n\t# TODO: try this\n\tif False: # Plot and save visual model\n\t\tplot_model(model21, _output_file=output_dir + 'drink_initial.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Initial gesture HMM')\n\t\tplt.show()\n\n\t# ###################################################### TRAIN HMM 18 ######################################################\n\tprint ('\\nTraining the gesture model 18 ...')\n\tmodel18=model\n\n\t# Get a list of training instances\n\ttraining_stream = [df.to_records(index=False).tolist() for df in training_dfs18]\n\n\t# Train gesture HMM; If you have sub-models you can train them individually even after combining them\n\tmodel18.train(training_stream, algorithm='baum-welch', transition_pseudocount=1)\n\n\t# Training changes the internal representation, back-annotate changes to internal graph for plotting\n\tbackannotate_internal_graph(model18)\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model18, _output_file=output_dir + 'drink_trained.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Trained gesture HMM')\n\t\tplt.show()\n\n# ###################################################### TRAIN HMM 19 ######################################################\n\tprint ('\\nTraining the gesture model 19 ...')\n\t\n\t# Get a list of training instances\n\ttraining_stream = [df.to_records(index=False).tolist() for df in training_dfs19]\n\n\t# Train gesture HMM; If you have sub-models you can train them individually even after combining them\n\tmodel19.train(training_stream, algorithm='baum-welch', transition_pseudocount=1)\n\n\t# Training changes the internal representation, back-annotate changes to internal graph for plotting\n\tbackannotate_internal_graph(model19)\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model19, _output_file=output_dir + 'drink_trained.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Trained gesture HMM')\n\t\tplt.show()\n\n# ###################################################### TRAIN HMM 20 ######################################################\n\tprint ('\\nTraining the gesture model 20 ...')\n\t\n\t\n\n\t# Get a list of training instances\n\ttraining_stream = [df.to_records(index=False).tolist() for df in training_dfs20]\n\n\t# Train gesture HMM; If you have sub-models you can train them individually even after combining them\n\tmodel20.train(training_stream, algorithm='baum-welch', transition_pseudocount=1)\n\n\t# Training changes the internal representation, back-annotate changes to internal graph for plotting\n\tbackannotate_internal_graph(model20)\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model20, _output_file=output_dir + 'drink_trained.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Trained gesture HMM')\n\t\tplt.show()\n\n# ###################################################### TRAIN HMM 21 ######################################################\n\tprint ('\\nTraining the gesture model 21 ...')\n\t\n\t\n\n\t# Get a list of training instances\n\ttraining_stream = [df.to_records(index=False).tolist() for df in training_dfs21]\n\n\t# Train gesture HMM; If you have sub-models you can train them individually even after combining them\n\tmodel21.train(training_stream, algorithm='baum-welch', transition_pseudocount=1)\n\n\t# Training changes the internal representation, back-annotate changes to internal graph for plotting\n\tbackannotate_internal_graph(model21)\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model21, _output_file=output_dir + 'drink_trained.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Trained gesture HMM')\n\t\tplt.show()\n\n# ###################################################### Merger Models ######################################################\n\tprint ('\\nMerging Models ...')\n\tmodelG = Model(name='MergedModel')\n\tmodelG.add_model(model18)\n\tmodelG.add_model(model19)\n\tmodelG.add_model(model20)\n\tmodelG.add_model(model21)\n\n\t# Introduce silent states for loops\n\tmodelG_meta_entry = State(None, name=modelG.name + ' start-meta')\n\tmodelG_meta_exit = State(None, name=modelG.name + ' end-meta')\n\tmodelG.add_transition(modelG.start, modelG_meta_entry, 1)\n\tmodelG.add_transition(modelG_meta_exit, modelG.end, 1)\n\n\t# p_enter_tm specifies the probability for choosing the threshold model over the gesture model\n\tmodelG.add_transition(modelG_meta_entry, model18.start, 0.1) # Enter model 18\n\tmodelG.add_transition(modelG_meta_entry, model19.start, 0.4) # Enter model 19\n\tmodelG.add_transition(modelG_meta_entry, model20.start, 0.2) # Enter model 20\n\tmodelG.add_transition(modelG_meta_entry, model21.start, 0.3) # Enter model 21\n\tmodelG.add_transition(model18.end, modelG_meta_exit, 1)\n\tmodelG.add_transition(model19.end, modelG_meta_exit, 1)\n\tmodelG.add_transition(model20.end, modelG_meta_exit, 1)\n\tmodelG.add_transition(model21.end, modelG_meta_exit, 1)\n\n\tmodelG.bake(merge='None')\n\t# TODO: try this\n\tif False: # Plot and save visual model\n\t\tplot_model(modelG, _output_file=output_dir + 'mergedmodel.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Merged Model')\n\t\tplt.show()\t\n\n\n\n# ###################################################### BUILD THRESHOLD HMM ######################################################\n\tprint ('\\nBuilding the threshold model ...')\n\n\t# The threshold model contains all emitting states from the gesture model; the threshold model is ergodic\n\tmodel_tm = Model(name=\"threshold\")\n\n\t# Collect all states and their self-loop transition probabilities\n\ttm_states = {}\n\ttrans_mat = modelG.dense_transition_matrix() # The transition matrix is created during the baking process\n\tfor i, s in enumerate(modelG.states):\n\t\tp = trans_mat[i, i]\n\t\tif not math.isinf(p):\n\t\t\ts_ = s.tied_copy() # New state but same distribution\n\t\t\ts_.name += '-tm'\n\t\t\ttm_states[s_] = math.exp(p)\n\t#exit(-1)\n\t# TODO: try to improve the threshold model by adding states\n#\t# Add a noise state\n#\tdummy = State(make_nd_distribution(data_dimension, NormalDistribution, [0, 47]), name='1337')\n#\ttm_states[dummy] = 0.6 # 60% loop probability\n\n\t# Create the ergodic graph\n\tmodel_tm_meta = State(None, name=model_tm.name + '-meta') # Virtual node to model ergodic graph\n\tfor s, p in tm_states.items():\n\t\tmodel_tm.add_state(s)\n\t\tmodel_tm.add_transition(s, s, p) # Loop\n\t\tmodel_tm.add_transition(s, model_tm_meta, 1 - p) # Return to virtual node\n\t\tmodel_tm.add_transition(model_tm_meta, s, 1) # Enter state\n\n\t# We cannot create a 'start -> meta' as that could lead to a non-emitting 'start -> meta -> end' sequence\n\t# and consequently, a non-emitting loop in the top level HMM. Thus, force a 'start -> emitting-state' transition\n\tmodel_tm.add_transitions(model_tm.start, tm_states.keys(), [1] * len(tm_states))\n\tmodel_tm.add_transition(model_tm_meta, model_tm.end, 1)\n\n\t# Normalize transition probabilities, do not merge silent states\n\tmodel_tm.bake(merge='None')\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model_tm, _output_file=output_dir + 'threshold_model.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Threshold HMM')\n\t\tplt.show()\n\n\t# ###################################################### BUILD TOP LEVEL HMM ######################################################\n\tprint ('\\nBuilding the top level model ...')\n\n\t# (1 - p_TM) (model)\n\t# /-----------> gesture_hmm -----\\\n\t# TLD_entry --> meta_entry --O O---> meta_exit --> TLD_exit\n\t# ^ \\-----------> threshold_hmm --/ |\n\t# | (p_TM) (model_tm) |\n\t# \\__________________________________________________/\n\n\t# Build the TLD model which just combines the gesture and threshold models\n\tmodel_tld = Model(name='TLD')\n\tmodel_tld.add_model(modelG) # Import gesture model as instance\n\tmodel_tld.add_model(model_tm) # Import threshold model as instance\n\n\t# Introduce silent states for loops\n\tmodel_tld_meta_entry = State(None, name=model_tld.name + ' start-meta')\n\tmodel_tld_meta_exit = State(None, name=model_tld.name + ' end-meta')\n\tmodel_tld.add_transition(model_tld.start, model_tld_meta_entry, 1)\n\tmodel_tld.add_transition(model_tld_meta_exit, model_tld.end, 1)\n\n\t# Loopback to start of model; this allows for capturing multiple gestures in one stream\n\tmodel_tld.add_transition(model_tld_meta_exit, model_tld_meta_entry, 1)\n\n\t# p_enter_tm specifies the probability for choosing the threshold model over the gesture model\n\tmodel_tld.add_transition(model_tld_meta_entry, model_tm.start, p_TM) # Enter threshold model\n\tmodel_tld.add_transition(model_tld_meta_entry, modelG.start, 1 - p_TM) # Enter gesture model\n\tmodel_tld.add_transition(model_tm.end, model_tld_meta_exit, 1)\n\tmodel_tld.add_transition(modelG.end, model_tld_meta_exit, 1)\n\n\t# Normalize transition probabilities, do not merge silent states\n\tmodel_tld.bake(merge='None')\n\n\t# Plot and save visual model\n\t# TODO: try this\n\tif False:\n\t\tplot_model(model_tld, _output_file=output_dir + 'tld_model.png', _output_dpi=200) # Check plot_graph() for more parameters\n\t\tplt.title('Top level HMM')\n\t\tplt.show()\n\n\t# ###################################################### TEST HMMs ######################################################\n\tprint ('\\nTesting the top level model ...')\n\n\t# Hide some of the silent states to increase readability\n\tdrop_states = [model_tld_meta_entry, model_tld_meta_exit, model_tm_meta]\n\n\t# Test with limited TRAINING data; only use sets\n\t# TODO: try this\n\tif False:\n\t\tfor i, train_df in zip(range(3), training_dfs):\n\n\t\t\t# Decode evaluation set using the top level model; omit meta states for readability when plotting\n\t\t\tpath = output_dir + 'TRAINING set %d' % i\n\t\t\ttest_decoded = from_data_make_sequence(train_df, _models=model_tld, _drop_states=drop_states, _output_prefix=path, _show=True)\n\t\t\tplt.title('Testing with TRAINING set %d' % i)\n\n\t\t\t# Convert the sequence to segments (intervals); result is compatible with plot_labels()\n\t\t\t# If we capture a sequence part of the gesture HMM 'model' assign the label ID 'spotter_label'\n\t\t\ttest_spotted_seg = from_sequence_make_segments(test_decoded, _model_to_label={model: spotter_label})\n\t\t\ttest_spotted_seg = test_spotted_seg[0] # We only have a single model to evaluate\n\n\t\t\t# Next plot the data stream\n\t\t\tax = train_df.plot()\n\n\t\t\t# The whole set is ground truth, no need for coloring, just color the spotted segment\n\t\t\tplot_labels(test_spotted_seg, _label_to_color=label_to_color, _alpha=0.5)\n\t\t\tlut = {key: col for key, col in label_to_color.iteritems() if key in test_labels + [spotter_label]} # Reduce legend to used labels\n\t\t\tadd_labels_to_legend(_ax=ax, _label_to_color=lut)\n\n\t\t\tplt.title('Testing with TRAINING set %d' % i)\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\n\t\t\tplt.tight_layout()\n\t\t\tplt.savefig(output_dir + 'testing with TRAINING set %d.png' % i, bbox_inches='tight')\n\t\t\tplt.show()\n\n\t# ###################################################### EVALUATE HMM ######################################################\n\tprint ('\\nEvaluating the top level model ...')\n\n\t# Demonstrate evaluation with test set\n\t# TODO: Increase the test set to cover more participants.\n\teval_labels = test_labels\n\teval_dfs = [test_df] # We can evaluate more than one test DataFrame\n\teval_segs = [test_seg]\n\t\n\n\t# Evaluate each frame individually\n\teval_spotted_segs = []\n\tfor i, (eval_df, eval_seg) in enumerate(zip(eval_dfs, eval_segs)):\n\t\n\t\t# Decode evaluation set using the top level model; omit meta states for readability when plotting\n\t\t# TODO: try '_show=True' to plot the decoded sequence as graph\n\t\tpath = output_dir + 'EVALUATION set %d' % i\n\t\teval_decoded = from_data_make_sequence(eval_df, _models=model_tld, _drop_states=drop_states, _output_prefix=path, _show=True)\n\t\tplt.title('EVALUATION set %d' % i)\n\n\t\t# Convert the sequence to segments (intervals); result is compatible with plot_labels()\n\t\t# If we capture a sequence part of the gesture HMM 'model' assign the label ID 'spotter_label'\n\t\teval_spotted_seg = from_sequence_make_segments(eval_decoded, _model_to_label={modelG: spotter_label})\n\t\t\t\t\n\t\teval_spotted_seg = eval_spotted_seg[0] # We only have a single model to evaluate\n\t\t\n\t\teval_spotted_segs.append(eval_spotted_seg)\n\n\t\t# TODO: Implement spotter performance evaluation per frame (based on eval_spotted_seg and eval_seg)\n\t\tprint ('Evaluation results for set %d ...') % i\n\t\tprint ('Ground truth')\n\t\tprint (eval_seg)\n\t\tprint ('Spotted segments')\n\t\tprint (eval_spotted_seg)\n\t\tprint ('')\n\t\t\n\t\t#Evaluation metrics (recall and precision) computation\n\t\tif(not(eval_spotted_seg.empty) and len(eval_spotted_seg)==1):\n\t\t\tif ((int(eval_spotted_seg['End']) <= int(eval_seg['Begin'])) or (int(eval_spotted_seg['Begin']) >= int(eval_seg['End']))):\n\t\t\t\tprint \"Precision: 0\"\n\t\t\t\tprint \"Recall: 0\"\n\t\t\telif (int(eval_spotted_seg['Begin'])<=int(eval_seg['Begin'])):\n\t\t\t\tprint \"Precision: \"+str(float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_seg['Begin']))/\t(int(eval_spotted_seg['End'])-int(eval_spotted_seg['Begin'])))\n\t\t\t\tprecision = float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_seg['Begin']))/(int(eval_spotted_seg['End'])-int(eval_spotted_seg['Begin']))\n\t\t\t\tprint \"Recall: \"+str(float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_seg['Begin']))/(int(eval_seg['End'])-int(eval_seg['Begin'])))\n\t\t\n\t\t\telif ((int(eval_spotted_seg['Begin'])<=int(eval_seg['End'])) and (int(eval_spotted_seg['Begin'])>=int(eval_seg['Begin']))):\n\t\t\t\tprint \"Precision: \"+str(float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_spotted_seg['Begin']))/(int(eval_spotted_seg['End'])-int(eval_spotted_seg['Begin'])))\t\n\t\t\t\tprecision = float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_spotted_seg['Begin']))/(int(eval_spotted_seg['End'])-int(eval_spotted_seg['Begin']))\n\t\t\t\tprint \"Recall: \"+str(float(min(int(eval_seg['End']),int(eval_spotted_seg['End']))-int(eval_spotted_seg['Begin']))/(int(eval_seg['End'])-int(eval_seg['Begin'])))\n\n\t\t# Plot the evaluation data set with labels\n\t\tif True:\n\t\t\tax = eval_df.plot()\n\t\t\tplt.tight_layout()\n\t\t\tplt.title('EVALUATION set %d, label of spotted events=%d' % (i, spotter_label))\n\t\t\tplt.xlabel('Samples')\n\t\t\tplt.ylabel('Raw data')\n\t\t\tplot_labels(eval_seg, _label_to_color=label_to_color)\n\t\t\tplot_labels(eval_spotted_seg, _label_to_color=label_to_color, _alpha=0.5)\n\n\t\t\t# Reduce legend to used labels\n\t\t\tlut = {key: col for key, col in label_to_color.iteritems() if key in eval_labels + [spotter_label]}\n\t\t\tadd_labels_to_legend(_ax=ax, _label_to_color=lut)\n\n\t\t\tplt.tight_layout()\n\t\t\tplt.savefig(output_dir + 'evaluation samples %d.png' % i, bbox_inches='tight')\n\n\t\tplt.show()\n\n\t# TODO: Implement spotter performance evaluation over all frames (based on eval_spotted_segs and eval_segs)", "def main(unused_argv):\n make_dir(FLAGS.raw_dir)\n\n # Get paths of download/extracted training and evaluation files.\n print(\"Downloading data from source\")\n train_files = get_raw_files(FLAGS.raw_dir, constants.TRAIN_DATA_SOURCES)\n eval_files = get_raw_files(FLAGS.raw_dir, constants.EVAL_DATA_SOURCES)", "def _preprocess(self):\n for f in self._variables:\n self._path.joinpath(f).mkdir(parents=True, exist_ok=True)\n\n for i in tqdm(range(self._size)):\n linear, w = self._get_spectrograms(i)\n self._store_entry(i, linear, w)", "def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')", "def main():\n parser = specify_parser()\n args = parser.parse_args()\n\n mapping = init_data(args.datafile[0])\n loaded_data = read(args.input)\n\n mount(mapping, loaded_data)", "def get_data_parascans(rootdir, datasetnames, filterdata):\n datasets = {}\n\n print('Loading: ' + str(len(datasetnames)) + ' datasets')\n for dataset in tqdm(datasetnames):\n time.sleep(0.1)\n\n # Original images (to predict)\n images_org = load_scans(rootdir + dataset + '/crop_org')\n\n # Ground truth images (mask image of expert)\n images_gt = load_scans(rootdir + dataset + '/crop_gt')\n images_gt = sitk.GetArrayFromImage(images_gt)\n\n # Smoothed images by specific filter\n images_smoothed = load_scans_filter(images_org, filterdata)\n\n # Save images in datasets dictionary\n datasets.update({dataset : {'org': images_org, 'gt': images_gt, 'smoothed': images_smoothed}})\n\n print(\"datasets created\")\n return datasets", "def load(self, dirs_to_look_in='', see_files=False, raw_data=False, \n current_dir=False, pre_calculated=False):\n all_cubes = []\n get_info = True\n # List used when just looking at filenames or loading raw data.\n see_files_to_load = []\n \n for lag in self._lags_dict.keys():\n search_string = self.prepend_lag_string + \\\n self.lag_str_format % lag + \\\n self.append_lag_string \n files_to_load = self._find_files(self.directory, dirs_to_look_in, \n search_string, current_dir,\n see_files)\n for load_file in files_to_load:\n if see_files or raw_data:\n see_files_to_load.append(load_file)\n continue\n fourier_cubelist = iris.load(load_file, self.variable)\n if not fourier_cubelist:\n continue\n if get_info:\n # Gather universal information from the first data to be \n # loaded.\n if self.xy_coords is None:\n self.xy_coords = [coord.name() \n for coord in get_xy_coords(\n fourier_cubelist[0])]\n self.time_unit = fourier_cubelist[0].coord(\n self.time_coord).units\n # Take a copy of a cube.\n skeleton_cube = fourier_cubelist[0].copy()\n get_info = False\n \n for day_of_year in self._lags_dict[lag]:\n if not pre_calculated:\n clim_data = self._calculate_clim_data(day_of_year, \n fourier_cubelist)\n clim_cube = skeleton_cube.copy()\n clim_cube.data = clim_data\n else:\n assert len(fourier_cubelist) == 1, 'Not a valid pre '\\\n 'calculated climatology.'\n clim_cube = fourier_cubelist[0]\n assert clim_cube.shape[0] == 365, 'Not a valid pre '\\\n 'calculated climatology.'\n clim_cube = clim_cube[day_of_year - 1]\n # Remove time and forecast ref coords as they are to be \n # replaced.\n clim_cube = remove_coords(clim_cube, \n [self.time_coord,\n self.forecast_ref_time]+\\\n self.unwanted_coords)\n # Replace time coord with day of year number.\n clim_cube.add_aux_coord(iris.coords.AuxCoord(\n day_of_year,\n standard_name=self.time_coord))\n # Replace forecast ref coord with initialisation 'day of \n # year' number.\n clim_cube.add_aux_coord(iris.coords.AuxCoord(\n day_of_year - lag,\n standard_name=self.forecast_ref_time))\n all_cubes.append(clim_cube)\n \n if see_files:\n print '\\n'.join(see_files_to_load)\n return\n if raw_data:\n return iris.load(see_files_to_load, self.variable)\n \n all_cubes = iris.cube.CubeList(all_cubes)\n cube = all_cubes.merge_cube()\n cube = self._area_inst.check_cube_area_bounds(cube, self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n \n # Get day and month dates from day of year numbers to be added to \n # atrributes.\n self.cube_init_dates = [self._calculate_date(day_of_year) \n for day_of_year in \n cube.coord(self.forecast_ref_time).points]\n self.cube_dates = [self._calculate_date(day_of_year) \n for day_of_year in \n cube.coord(self.time_coord).points]\n cube.coord(self.forecast_ref_time).attributes = {\n 'dates':self.cube_init_dates}\n cube.coord(self.time_coord).attributes = {'dates':self.cube_dates}\n self.cube = cube\n self.metadata = self._get_metadata()\n return self.cube", "def getskt_msr_action3d(action_id, subject_id, event_id):\n\n filename = rootpath+'outputUT\\\\a{:0>2}_s{:0>2}_e{:0>2}_skeleton3D.txt'.format(action_id, subject_id, event_id)\n if not os.path.exists(filename):\n return\n data = np.loadtxt(filename)\n #data[:,2] /= 4\n data[:,[1,2]] = data[:,[2,1]]\n #print(data.shape[0]/20)\n return np.reshape(data, [-1, 20, 3])", "def load_data(data_dir):\n contents = os.listdir(data_dir)\n fnames = [i for i in contents if i.endswith('.z')]\n keys = [i.rstrip('.z') for i in fnames]\n data_files = [os.path.join(data_dir, i) for i in fnames]\n data = {}\n for key, val in zip(keys, data_files):\n if 'x_rank' in key:\n continue\n io.log(f'Restored {key} from {val}.')\n data[key] = io.loadz(val)\n\n return AttrDict(data)", "def Test_data():\n print (\"loading test data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n\n with h5py.File(join(data_root, './data/test_real2.h5')) as f:\n test_real = f['test_real'][:]\n with h5py.File(join(data_root, './data/test_imag2.h5')) as f:\n test_imag = f['test_imag'][:]\n test_real = np.transpose(test_real, (0, 1, 3, 2))\n test_imag = np.transpose(test_imag, (0, 1, 3, 2))\n test_data = test_real+1j*test_imag\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end - time_start))\n return test_data", "def run_query(data_path, name, RA, DEC, config, z=\"Redshift\"):\n # Set the paths based on the where you want the data to be downloaded and\n # the identified for the sources/field the redshifts are downloaded for\n if not os.path.exists(f\"{data_path}/{name}\"):\n os.makedirs(f\"{data_path}/{name}\")\n path_concat = f\"{data_path}/{name}/{name}_online_redshift.fits\"\n path_ident = f'{path_concat.replace(\".fits\", \"\")}_ident.fits'\n path_unique = f'{path_concat.replace(\".fits\", \"\")}_ident_unique.fits'\n\n # Build coordinates\n coords = coord.SkyCoord(RA, DEC)\n\n # Perform the redshift query on Vizier and NED and write to fits file\n grand_table = query_redshift(coords, data_path, name, config)\n\n grand_table.meta[\"description\"] = \"Vizier and NED redshifts\"\n grand_table.write(path_concat, format=\"fits\", overwrite=True)\n\n # Identify duplicates and keep only the best redshift measurement\n duplicates = identify_duplicates(path_concat, path_ident, RA=\"RA\", DEC=\"DEC\")\n if duplicates == True:\n find_groups_redshift(path_ident, path_unique, z)\n else:\n shutil.copyfile(path_ident, path_unique)", "def generate_dat_files(rspecs, datroot, bands, labels):\n d = ds9.ds9()\n d.set('rgb')\n d.set('rgb red')\n\n # Save plaintext projection data\n # Idea: minimize file (band) loading operations\n for fname, flab in zip(bands, labels):\n d.set('file ' + fname) # Load a band\n for i in xrange(len(rspecs)):\n d.set('regions', rspecs[i]) # Load a region\n d.set('rgb red') # Plot projection data\n dat_fname = '{0}_{1:02d}_band_{2}.dat'.format(datroot, i+1, flab)\n d.set('plot {0} save {1}'.format(d.get('plot'), dat_fname))\n d.set('regions delete all')\n d.set('exit')", "def run(path, f3_param=[[1, 0.01]], minArea=20, saveNumber=0):\n\tprint('=== path:', path)\n\t\n\t# load x/y/z voxel size (assumes .tif was saved with Fiji\n\txVoxel, yVoxel, zVoxel = readVoxelSize(path)\n\tprint(' xVoxel:', xVoxel, 'yVoxel:', yVoxel, 'zVoxel:', zVoxel)\n\t\n\t# load the data\n\treader = AICSImage(path) \n\tIMG = reader.data.astype(np.float32)\n\tprint(' IMG.shape:', IMG.shape)\n\n\tstructure_channel = 0\n\tstruct_img0 = IMG[0,structure_channel,:,:,:].copy()\n\n\t# give us a guess for our intensity_scaling_param parameters\n\t#from aicssegmentation.core.pre_processing_utils import suggest_normalization_param\n\t#suggest_normalization_param(struct_img0)\n\tlow_ratio, high_ratio = my_suggest_normalization_param(struct_img0)\n\n\t#intensity_scaling_param = [0.0, 22.5]\n\tintensity_scaling_param = [low_ratio, high_ratio]\n\tprint('*** intensity_normalization() intensity_scaling_param:', intensity_scaling_param)\n\t\n\t# intensity normalization\n\tprint('=== calling intensity_normalization()')\n\tstruct_img = intensity_normalization(struct_img0, scaling_param=intensity_scaling_param)\n\n\t# smoothing with edge preserving smoothing \n\tprint('=== calling edge_preserving_smoothing_3d()')\n\tstructure_img_smooth = edge_preserving_smoothing_3d(struct_img)\n\n\t#\n\t\"\"\"\n\tsee: notebooks/playground_filament3d.ipynb\n\n\tscale_x is set based on the estimated thickness of your target filaments.\n\t\tFor example, if visually the thickness of the filaments is usually 3~4 pixels,\n\t\tthen you may want to set scale_x as 1 or something near 1 (like 1.25).\n\t\tMultiple scales can be used, if you have filaments of very different thickness.\n\tcutoff_x is a threshold applied on the actual filter reponse to get the binary result.\n\t\tSmaller cutoff_x may yielf more filaments, especially detecting more dim ones and thicker segmentation,\n\t\twhile larger cutoff_x could be less permisive and yield less filaments and slimmer segmentation.\n\t\"\"\"\n\t#f3_param = [[1, 0.01]] # [scale_1, cutoff_1]\n\tprint('=== calling filament_3d_wrapper() f3_param:', f3_param)\n\tbw = filament_3d_wrapper(structure_img_smooth, f3_param)\n\t\t\n\t#\n\t#minArea = 20 # from recipe\n\tprint('=== calling remove_small_objects() minArea:', minArea)\n\tseg = remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False)\n\n\t#\n\t# save original file again (with saveNumber\n\tsaveNumberStr = ''\n\tif saveNumber>1:\n\t\tsaveNumberStr = '_' + str(saveNumber)\n\t\t\n\t#\n\t# save mask\n\tseg = seg >0\n\tout=seg.astype(np.uint8)\n\tout[out>0]=255\n\t\n\t# save _dvMask\n\tmaskPath = os.path.splitext(path)[0] + '_dvMask' + saveNumberStr + '.tif'\n\tprint('=== saving 3D mask [WILL FAIL IF FILE EXISTS] as maskPath:', maskPath)\n\ttry:\n\t\twriter = omeTifWriter.OmeTifWriter(maskPath)\n\t\twriter.save(out)\n\texcept(OSError) as e:\n\t\tprint(' error: file already exists, di dnot resave, maskPath:', maskPath)\n\t\t\n\t#\n\t# analyze skeleton, take a 3d mask and analyze as a 1-pixel skeleton\n\tretDict0, mySkeleton = myAnalyzeSkeleton(out=out, imagePath=path)\n\tretDict = OrderedDict()\n\tretDict['tifPath'] = path\n\tretDict['maskPath'] = maskPath\n\tretDict['tifFile'] = os.path.basename(path)\n\tretDict['xVoxel'] = xVoxel\n\tretDict['yVoxel'] = yVoxel\n\tretDict['zVoxel'] = zVoxel\n\t#\n\tretDict['params'] = OrderedDict()\n\tretDict['params']['saveNumber'] = saveNumber\n\tretDict['params']['intensity_scaling_param'] = intensity_scaling_param # calculated in my_suggest_normalization_param\n\tretDict['params']['f3_param'] = f3_param[0] # cludge, not sure where to put this. f3_param is a list of list but screws up my .csv output !!!\n\tretDict['params']['minArea'] = minArea\n\n\tretDict.update( retDict0 )\n\n\t# save 1-pixel skeleton: mySkeleton\n\t# save _dvSkel\n\tskelPath = os.path.splitext(path)[0] + '_dvSkel' + saveNumberStr + '.tif'\n\tprint('=== saving 3D skel [WILL FAIL IF FILE EXISTS] as maskPath:', skelPath)\n\ttry:\n\t\twriter = omeTifWriter.OmeTifWriter(skelPath)\n\t\twriter.save(mySkeleton)\n\texcept(OSError) as e:\n\t\tprint(' error: file already exists, di dnot resave, skelPath:', skelPath)\n\t\t\t\n\treturn retDict", "def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)", "def analysis():\n global prediction\n\n json_path = os.path.join(basedir, 'static', 'data', 'tmp_json')\n # csv_path = os.path.join(basedir, 'static', 'data', 'csv')\n # if not os.path.exists(csv_path):\n # os.mkdir(csv_path)\n\n if os.name == 'nt':\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf.dir'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf.dir'))\n else:\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf'))\n\n # Files exists\n if audio_file.is_file() and video_file.is_file():\n with shelve.open(os.path.join(json_path, 'facial_data.shlf')) as shelf:\n emotion_data = shelf['emotion_data']\n microexpression_data = shelf['micro_expression_data']\n blink_data = shelf['blink_data']\n\n with shelve.open(os.path.join(json_path, 'audio_data.shlf')) as shelf:\n mean_energy = shelf['mean_energy']\n max_pitch_amp = shelf['max_pitch_amp']\n vowel_duration = shelf['vowel_duration']\n pitch_contour = shelf['pitch_contour']\n\n else:\n emotion_data = None\n microexpression_data = None\n blink_data = None\n mean_energy = None\n max_pitch_amp = None\n vowel_duration = None\n pitch_contour = None\n\n # Training Files (choose one)\n # soc_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_soc.txt')\n # niko_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_niko.txt')\n # vero_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_vero.txt')\n\n # txt_file = soc_file\n\n # train_data = []\n\n # for cases where one parameter has more elements\n # for i in range(min(len(blink_data), len(microexpression_data), len(mean_energy))):\n # train_data.append(0)\n\n # train_file = open(txt_file)\n\n # for line in train_file:\n # index1 = int((int(line[4]) * 600) + ((int(line[5]) * 60) + (int(line[7]) * 10) + int(line[8])) / 2)\n # index2 = int((int(line[10]) * 600) + ((int(line[11]) * 60) + (int(line[13]) * 10) + int(line[14])) / 2)\n # if line[0] == 'F':\n # train_data[index1] = 1\n # train_data[index2] = 1\n\n # with open(os.path.join(csv_path, 'train.csv'), 'w', newline='') as csv_file:\n # writer = csv.writer(csv_file)\n # writer.writerow(['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency',\n # 'False/True'])\n\n # # for cases where one parameter has more elements than another\n # for index in range(min(len(mean_energy), len(blink_data), len(microexpression_data))):\n # writer.writerow([index, microexpression_data[index], blink_data[index],\n # mean_energy[index], max_pitch_amp[index], vowel_duration[index], pitch_contour[index],\n # train_data[index]])\n\n # finalresults = [['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency' ]]\n final_results = []\n\n for index in range((min(len(mean_energy), len(blink_data), len(microexpression_data)))):\n final_results.append([microexpression_data[index], blink_data[index],\n mean_energy[index], max_pitch_amp[index], vowel_duration[index],\n pitch_contour[index]])\n\n prediction[0] = predict(final_results)\n\n return render_template('analysis.html', mean_energy=mean_energy, max_pitch_amp=max_pitch_amp,\n vowel_duration=vowel_duration, pitch_contour=pitch_contour, blink_data=blink_data,\n microexpression_data=microexpression_data, emotion_data=emotion_data)", "def process_data(dataset='school'):\n\n assert isinstance(dataset, str)\n assert dataset == 'school' or dataset == 'industry'\n\n if dataset == 'school':\n # get the directory where all the raw files live\n folder_name = os.path.join('raw_data')\n\n # create a connection to a database where all our processed data live\n # for details please see `SQLite.py`\n # db = SQLite(os.path.join('processed_data','database.db'))\n db = SQLite(os.path.join('processed_data','school.db'))\n else:\n folder_name = os.path.join('industry_data')\n db = SQLite(os.path.join('processed_data','industry.db'))\n\n\n # calling the above generator, we work with the every raw data file in the folder\n for i in get_raw_data_filenames_in_txt_format(folder_name):\n\n # calling the word_freq function in another file.\n # this function gives back two dictionaries including\n # (1) single words and their frequencies\n # (2) bigram phrases and their frequencies\n # for more detail please see `word_freq.py`\n logger.info(f'Currently working on: {i}')\n # print(\"[process data] Currently working on :\", i)\n dict_single, dict_bigram = word_freq(\n os.path.join(folder_name, i))\n\n # passing the two dictionaries into our database\n table_name = i.split('.txt')[0]\n table_name_single = table_name + '_single'\n table_name_bigram = table_name + '_bigram'\n\n # creating corresponding table and insert the dictionary into the table\n # err is an error code for debugging purposes, for details, please see\n # `SQLite.py`\n db.create_table(table_name_single)\n db.insert_dict(table_name_single, dict_single)\n\n db.create_table(table_name_bigram)\n db.insert_dict(table_name_bigram, dict_bigram)", "def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend\"\n output_data = \"s3a://vivek1bucket\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def data_prepare(raw_datapath, save_path, sample_size=256):\n ## data path\n data_path = raw_datapath\n ## sample size\n data_size = sample_size\n\n ## data lists\n pts = ['100', '104', '108', '113', '117', '122', '201', '207', '212', '217', '222', '231',\n '101', '105', '109', '114', '118', '123', '202', '208', '213', '219', '223', '232',\n '102', '106', '111', '115', '119', '124', '203', '209', '214', '220', '228', '233',\n '103', '107', '112', '116', '121', '200', '205', '210', '215', '221', '230', '234']\n\n ## map the ~19 classes to 5 classes\n ## according to the paper https://arxiv.org/pdf/1805.00794.pdf\n mapping = {'N': 0, 'L': 0, 'R': 0, 'e': 0, 'j': 0, 'B': 0, # N = 0\n 'A': 1, 'a': 1, 'J': 1, 'S': 1, # S = 1\n 'V': 2, 'E': 2, 'r': 2, 'n': 2, # V = 2\n 'F': 3, # F = 3\n '/': 4, 'f': 4, 'Q': 4, '?': 4} # Q = 4\n ignore = ['+', '!', '[', ']', 'x', '~', '|', '\"']\n\n ## we split the each set of the data into size 256( which we can see the ecg pulse, just one pulse)\n def dataSaver(dataset=pts, data_size=data_size):\n input_size = data_size ## default\n\n def dataprocess():\n ecg = np.zeros((1, input_size))\n label = np.zeros((1, 1))\n for num in tqdm(dataset):\n print(num, 'now')\n idx = 0 ## count for the matrixes\n record = wfdb.rdrecord(data_path + num, smooth_frames=True)\n\n ## normalize the data ecg\n signals0 = np.nan_to_num(record.p_signal[:, 0])\n # signals1 = np.nan_to_num(record.p_signal[:, 1])\n min_max_scaler = preprocessing.MinMaxScaler()\n signals0 = min_max_scaler.fit_transform(signals0.reshape(-1, 1))\n # signals1 = min_max_scaler.fit_transform(signals1.reshape(-1, 1))\n signals0 = signals0.reshape(-1)\n # signals1 = signals1.reshape(-1)\n\n ## find peaks # R-peaks\n ## we only use the channel 0\n peaks, _ = find_peaks(signals0, distance=150)\n\n X = np.zeros((len(peaks), input_size))\n Y = np.zeros((len(peaks), 1))\n\n # skip a first peak to have enough range of the sample\n # in the for loop, we look for the annotation\n for peak in tqdm(peaks[1:-1]):\n start, end = peak - input_size // 2, peak + input_size // 2\n start = max([0, start])\n end = min([len(signals0), end])\n ann = wfdb.rdann(data_path + num, extension='atr', sampfrom=start, sampto=end,\n return_label_elements=['symbol'])\n symbol = ann.symbol\n count = 0\n if len(symbol) != 1:\n for sym in symbol:\n if sym in ignore:\n count += 1\n continue\n elif sym == 'N':\n continue\n else:\n symbol = sym\n break\n if count > 0 and len(symbol) > 1:\n symbol = '+'\n elif len(symbol) > 1:\n symbol = 'N'\n elif len(symbol) == 0:\n symbol = '+'\n assert len(symbol) <= 1, \"the symbol is not only one.{} len\".format(len(symbol))\n\n if len(symbol) == 1:\n for ss in symbol:\n if ss in ignore:\n continue\n else:\n Y[idx, 0] = mapping[ss]\n sig = signals0[start:end]\n X[idx, :len(sig)] = sig\n idx += 1\n ecg = np.concatenate((ecg, X), axis=0)\n label = np.concatenate((label, Y), axis=0)\n ecg = ecg[1:, :]\n label = label[1:, :]\n ecg = pd.DataFrame(ecg)\n label = pd.DataFrame(label)\n\n return ecg, label\n ecg, label = dataprocess()\n return ecg, label\n\n ecg, label = dataSaver(pts)\n ecg_path = save_path + \"/ecg_signal_{}.csv\".format(data_size)\n label_path = save_path + \"/label_{}.csv\".format(data_size)\n ecg.to_csv(ecg_path, index=None, header=None)\n label.to_csv(label_path, index=None, header=None)\n return ecg, label", "def prepareData(args):\n print(\"Starting preprocessing\")\n\n # params\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = args['note_range']\n window_size = args['window_size']\n sr = args['sr']\n hop_length = args['hop_length']\n wav_dir = args['wav_dir']\n\n datapath = os.path.join(args['proj_root'], 'Features')\n bin_multiple = int(args['bin_multiple'])\n\n framecnt = 0\n maxFramesPerFile = args['maxFramesPerFile']\n maxFrames = args['maxFrames']\n\n fileappend = str(maxFramesPerFile) + 'pf_max' + str(maxFrames) + '.dat'\n\n filenameIN = os.path.join(datapath, 'input_' + fileappend)\n filenameOUT = os.path.join(datapath, 'output_' + fileappend)\n\n if os.path.isfile(filenameIN) and os.path.isfile(filenameOUT):\n n_bins = note_range * bin_multiple\n print('loading precomputed data from ' + filenameIN)\n mmi = np.memmap(filenameIN, mode='r', dtype=\"float64\")\n inputs = np.reshape(mmi, (-1, window_size, n_bins))\n\n mmo = np.memmap(filenameOUT, mode='r', dtype=\"float64\")\n outputs = np.reshape(mmo, (-1, note_range))\n\n return inputs, outputs, datapath\n\n inputs, outputs = [], []\n addCnt, errCnt = 0, 0\n\n # hack to deal with high PPQ from MAPS\n # https://github.com/craffel/pretty-midi/issues/112\n pretty_midi.pretty_midi.MAX_TICK = 1e10\n\n for s in os.listdir(wav_dir):\n subdir = os.path.join(wav_dir, s)\n if not os.path.isdir(subdir):\n continue\n # recursively search in subdir\n print(subdir)\n for dp, dn, filenames in os.walk(subdir):\n # in each level of the directory, look at filenames ending with .mid\n for f in filenames:\n # if there exists a .wav file and .midi file with the same name\n\n if f.endswith('.wav'):\n audio_filename = f\n fprefix = audio_filename.split('.wav')[0]\n mid_fn = fprefix + '.mid'\n txt_fn = fprefix + '.txt'\n print(\"Handling files {}\".format(fprefix))\n if mid_fn in filenames:\n # extract_features\n audio_filename = os.path.join(dp, audio_filename)\n inputnp = extract_features(audio_filename, args)\n times = librosa.frames_to_time(np.arange(inputnp.shape[0]), sr=sr, hop_length=hop_length)\n # mid2outputnp\n mid_fn = os.path.join(dp, mid_fn)\n pm_mid = pretty_midi.PrettyMIDI(mid_fn)\n\n outputnp = mid2outputnp(pm_mid, times, args)\n\n # check that num onsets is equal\n if inputnp.shape[0] == outputnp.shape[0]:\n # Some filtering highly pragmatic filtering on the data!!\n # take only frames that are \"sufficiently loud\", ...\n good2take = np.array(inputnp.max(axis=(1, 2)) > 0.05)\n # ... and always omit the last frame as this has been padded ...\n good2take[-1] = False # omit last\n # ... and only take frames with at least one true label (i.e. some tone is played)\n good2take = good2take & (outputnp.max(axis=1) > 0)\n outputnp = outputnp[good2take, ]\n inputnp = inputnp[good2take, ]\n\n addCnt += 1\n if inputnp.shape[0] > maxFramesPerFile > 0:\n inputnp = inputnp[:maxFramesPerFile]\n outputnp = outputnp[:maxFramesPerFile]\n framecnt += inputnp.shape[0]\n print(\"framecnt is {}\".format(framecnt))\n inputs.append(inputnp)\n outputs.append(outputnp)\n else:\n print(\"error for fprefix {}\".format(fprefix))\n errCnt += 1\n print(inputnp.shape)\n print(outputnp.shape)\n\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(subdir))\n break\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(wav_dir))\n break\n\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(wav_dir))\n break\n\n print(\"{} examples in dataset\".format(addCnt))\n print(\"{} examples couldnt be processed\".format(errCnt))\n\n # concatenate dynamic list to numpy list of example\n if addCnt:\n inputs = np.concatenate(inputs)\n outputs = np.concatenate(outputs)\n\n print(\"inputs.shape\")\n print(inputs.shape)\n print(\"outputs.shape\")\n print(outputs.shape)\n mmi = np.memmap(filename=filenameIN, mode='w+', shape=inputs.shape, dtype=\"float64\")\n mmi[:] = inputs[:]\n mmo = np.memmap(filename=filenameOUT, mode='w+', shape=outputs.shape, dtype=\"float64\")\n mmo[:] = outputs[:]\n del mmi\n del mmo\n\n return inputs, outputs, datapath", "def _load_raw_datashards(shard_num, nb_collaborators): \n train_obj = torchvision.datasets.CIFAR10('~/.CIFAR10', train=True, download=True) \n test_obj = torchvision.datasets.CIFAR10('~/.CIFAR10', train=False, download=True) \n x_train = train_obj.data\n y_train = np.asarray(train_obj.targets)\n x_test = test_obj.data\n y_test = np.asarray(test_obj.targets)\n # fix the label dimension to be (N,)\n y_train = y_train.reshape(-1)\n y_test = y_test.reshape(-1) \n \n # create the shards\n X_train_shards = x_train[shard_num::nb_collaborators]\n y_train_shards = y_train[shard_num::nb_collaborators]\n \n X_test_shards = x_test[shard_num::nb_collaborators]\n y_test_shards = y_test[shard_num::nb_collaborators]\n return (X_train_shards, y_train_shards), (X_test_shards, y_test_shards)", "def run(prefix):\n # run_tests.assert_folder_is_empty(prefix=prefix)\n xrs_good,xrs_poor,f_obs,r_free_flags = run_tests.setup_helix_example()\n # pdb_inp = os.path.join(qr_unit_tests,\"data_files\",\"2lvr.pdb\")\n r = run_tests.run_cmd(prefix,\n args = [\"restraints=cctbx\",\"mode=gtest\",\"g_scan=20\",\"g_mode=1\"],\n pdb_name = 'm00_poor.pdb', mtz_name='')\n assert os.path.isfile('1-20.npy')", "def download_hess_dr1_data():\n download_data_files(FILENAMES_HESS_DR1)", "def convert_to_3d(vars, smi_file, smile_file_directory):\n\n print(\"CONVERTING SMILES TO SDF\")\n # convert smiles in an .SMI file to sdfs using gypsum\n gypsum_output_folder_path = convert_smi_to_sdfs_with_gypsum(\n vars, smi_file, smile_file_directory\n )\n print(\"CONVERTING SMILES TO SDF COMPLETED\")\n\n print(\"CONVERTING SDF TO PDB\")\n # convert sdf files to PDBs using rdkit\n convert_sdf_to_pdbs(vars, smile_file_directory, gypsum_output_folder_path)\n print(\"CONVERTING SDF TO PDB COMPLETED\")", "def main():\n\n browser = initialize()\n process_directory(browser, \"data\")\n browser.close()", "def write(self,data): \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n \n if os.path.exists(self.outfile):\n output = h5py.File(self.outfile,'a')\n else:\n output = h5py.File(self.outfile,'w')\n\n # Set permissions and group\n if self.set_permissions:\n try:\n os.chmod(self.outfile,0o664)\n shutil.chown(self.outfile, group=self.permissions_group)\n except PermissionError:\n self.logger(f'{fname}:{self.name}: Warning, couldnt set the file permissions.')\n\n # Store datasets in root\n data_out = {'tod':self.all_tod,\n 'weights':self.all_weights,\n 'mask':self.all_mask,\n 'cal_factors':self.all_cal_factors,\n 'frequency':self.all_frequency,\n 'auto_rms':self.all_auto}\n\n for dname, dset in data_out.items():\n if dname in output:\n del output[dname]\n output.create_dataset(dname, data=dset)\n\n output.attrs['version'] = __level3_version__\n output['cal_factors'].attrs['source'] = self.cal_source\n output['cal_factors'].attrs['calibrator_obsid'] = self.nearest_calibrator\n\n output.close()\n \n if self.level3 in data.keys():\n del data[self.level3]\n data[self.level3] = h5py.ExternalLink(self.outfile,'/')", "def extract_hrc_data(obsid, data_dir):\n#\n#--- extract fits data\n#\n line = 'operation=retrieve\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'level=1\\n'\n line = line + 'detector=hrc\\n'\n line = line + 'obsid=' + str(obsid) + '\\n'\n line = line + 'go\\n'\n\n with open('zline', 'w') as fo:\n fo.write(line)\n\n cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > zout'\n os.system(cmd)\n#\n#--- create directories and move the data into them\n#\n cmd = 'mkdir primary secondary'\n os.system(cmd)\n \n cmd = 'mv *dtf1*fits* *fov*fits* ./primary/.'\n os.system(cmd)\n\n cmd = 'mv *bpix1*fits* *evt1*fits* *msk1*fits* *mtl1*fits* \\\n *std_dtfstat1.fits* *std_flt1.fits* ./secondary/.'\n os.system(cmd)\n\n line = 'operation=retrieve\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'level=1\\n'\n line = line + 'detector=pcad\\n'\n line = line + 'subdetector=aca\\n'\n line = line + 'obsid=' + str(obsid) + '\\n'\n line = line + 'go\\n'\n\n with open('zline', 'w') as fo:\n fo.write(line)\n\n cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > zout'\n os.system(cmd)\n cmd = 'mv *asol*fits* ./primary/.'\n os.system(cmd)\n\n cmd = 'rm -rf *fits* zline zout'\n os.system(cmd)\n\n hdir = data_dir + '/' + str(obsid)\n if os.path.isdir(hdir):\n cmd = 'rm -rf ' + hdir + '/*'\n os.system(cmd)\n else:\n cmd = 'mkdir ' + hdir \n os.system(cmd)\n\n cmd = 'chmod 774 primary/* secondary/*'\n os.system(cmd)\n\n#\n#--- check whether there are duplicated fits files extracted; if so, remove older ones\n#\n h_list = ['dtf1', 'fov1', 'asol1']\n sdir = 'primary'\n remove_duplicate(h_list, sdir)\n\n h_list = ['bpix1', 'evt1', 'msk1', 'mtl1', 'std_dtfstat1', 'std_flt1']\n sdir = 'secondary'\n remove_duplicate(h_list, sdir)\n\n cmd = 'mv primary secondary ' + hdir + '/.'\n os.system(cmd)\n\n cmd = 'rm -rf ' + hdir + '/analysis/* ' \n os.system(cmd)\n\n return check_data_exist(hdir)", "def get_data_scans(rootdir, datasetnames, filterdata):\n datasets = {}\n\n print('Loading: ' + str(len(datasetnames)) + ' datasets')\n for dataset in tqdm(datasetnames):\n time.sleep(0.1)\n\n # Original images (to predict)\n images_org = load_scans(rootdir + dataset + '/crop_org')\n\n # Smoothed images by specific filter\n images_smoothed = load_scans_filter(images_org, filterdata)\n\n # Save images in datasets dictionary\n datasets.update({dataset : {'org': images_org, 'smoothed': images_smoothed}})\n\n print(\"datasets created\")\n return datasets", "def _init_dataset():\n global _residues\n if _residues is not None:\n # Database is already initialized\n return\n\n # Residuue data is taken from\n # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif\n # (2019/01/27)\n _info_dir = dirname(realpath(__file__))\n with open(join(_info_dir, \"residues.msgpack\"), \"rb\") as file:\n _residues = msgpack.unpack(\n file, use_list=False, raw=False\n )", "def _preprocess_data(self, name, directory):\n if name.endswith('data'):\n for path in glob(str(directory / '**/*.jpg'), recursive=True):\n try:\n with Image.open(path) as img:\n if not name.startswith('feature'):\n img = img.rotate(-90, 0, 1)\n img = img.resize(self.input_shape)\n except (ValueError, OSError):\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n filename = path.name.split('img-')[1]\n target = (path.parent / filename).with_suffix('.image.png')\n img.save(target, 'PNG')\n os.remove(str(path))\n elif name.endswith('targets'):\n for path in glob(str(directory / '**/*.mat'), recursive=True):\n try:\n mat = spio.loadmat(path)['depthMap']\n img = spmisc.toimage(mat).resize(self.target_shape)\n except ValueError:\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n name = path.name[path.name.index('-') + 1:]\n target = (path.parent / name).with_suffix('.depth.png')\n img.save(target, 'PNG')\n os.remove(str(path))", "def create_dataloaders(data_dir):\n\n trng_dataset = datasets.ImageFolder(data_dir / TRNG_FOLDER,\n transform=flowernet.trng_transform)\n trng_dataloader = torch.utils.data.DataLoader(trng_dataset,\n batch_size=64,\n shuffle=True)\n\n valn_dataset = datasets.ImageFolder(data_dir / VALN_FOLDER,\n transform=flowernet.pred_transform)\n valn_dataloader = torch.utils.data.DataLoader(valn_dataset,\n batch_size=64,\n shuffle=True)\n\n return trng_dataloader, valn_dataloader", "def load_data(self):", "def data():\n return _SCRIPT_DIR / \"data\"", "def main():\n with open(IMAGEPATH_LIST_PATH, \"rt\") as imagepath_list_handle:\n imagepath_list = [line.strip() for line in imagepath_list_handle.readlines()]\n\n object_detector = ObjectDetector(MODEL_PATH)\n\n dataset_json = []\n for imagepath in imagepath_list:\n image = scipy.misc.imread(imagepath)\n detections = object_detector.run(image)\n\n detections_json = {\"path\": imagepath, \"detections\": [det.to_dict() for det in detections]}\n dataset_json.append(detections_json)\n\n with open(DATASET_PATH, \"wt\") as json_handle:\n json.dump(dataset_json, json_handle, sort_keys=True, indent=4)", "def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']", "def main():\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n get_user_config()", "def execute():\n \n # URL to the datasets part of SHARKdata.\n sharkdata_url = u'http://sharkdata.se/datasets/'\n\n # Download a list of all available datasets. The JSON format is used.\n datasets = json.load(urllib2.urlopen(sharkdata_url + u'list.json'))\n \n # Exit if no datasets are found. \n if len(datasets) < 1:\n print(u'No datasets found. Script terminated.')\n return\n\n # Print some info for all available datasets.\n print(u'\\nAvailable datasets on SHARKdata:' + u'\\n')\n for dataset in datasets:\n print(u' Datatype: ' + dataset[u'datatype'] + u' Name: ' + dataset[u'dataset_name'])\n \n # Get the name of the first dataset in the list.\n dataset_name = datasets[0][u'dataset_name']\n \n # Download header and data and print the content. The text format is used.\n print(u'\\nPrint dataset content for: ' + dataset_name + u'\\n')\n header_and_data = urllib2.urlopen(sharkdata_url + dataset_name + u'/data.txt')\n \n for row in header_and_data:\n # The text format character encoding is cp1252 (equal to windows-1252).\n row = row.decode(u'cp1252')\n# print(row.strip())\n\n # Download header and data and save to file.\n dataset_name = datasets[0][u'dataset_name']\n filename = datasets[0][u'dataset_file_name'].replace(u'.zip', u'.txt')\n character_encoding = u'utf8' # Some alternatives: cp1252, utf-8, utf-16, ascii, latin1, macroman.\n row_delimiter = u'\\r\\n'\n print(u'\\nDataset content for: ' + dataset_name + u' to file: ' + filename + u'\\n')\n out_file = None\n try:\n out_file = codecs.open(filename, mode = 'w', encoding = character_encoding)\n header_and_data = urllib2.urlopen(sharkdata_url + dataset_name + u'/data.txt')\n for row in header_and_data:\n row = row.decode(u'cp1252')\n out_file.write(row.strip() + row_delimiter)\n finally:\n if out_file: out_file.close()", "def run_script(input_dir, output_dir):\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 1. load dataset \"\"\"\n print(\"loading data ......\")\n print(\"+++++++Read the surface shape data+++++++\")\n shape_file_name = input_dir + \"aligned_shapes.mat\"\n mat = loadmat(shape_file_name)\n y_design = mat['aligned_shape']\n n, l, m = y_design.shape\n print(\"The dimension of shape matrix is \" + str(y_design.shape))\n print(\"+++++++Read the sphere coordinate data+++++++\")\n template_file_name = input_dir + \"template.mat\"\n mat = loadmat(template_file_name)\n coord_mat = mat['template']\n # d = coord_mat.shape[1]\n print(\"+++++++Read the design matrix+++++++\")\n design_data_file_name = input_dir + \"design_data.txt\"\n design_data = np.loadtxt(design_data_file_name)\n # read the covariate type\n var_type_file_name = input_dir + \"var_type.txt\"\n var_type = np.loadtxt(var_type_file_name)\n print(\"+++++++Construct the design matrix: normalization+++++++\")\n x_design = read_x(design_data, var_type)\n p = x_design.shape[1]\n print(\"The dimension of design matrix is \" + str(x_design.shape))\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 2. Statistical analysis: including (1) smoothing and (2) hypothesis testing\"\"\"\n gpvals, lpvals_fdr, clu_pvals, efit_beta, efity_design, efit_eta = mfsda.run_stats(y_design, coord_mat, design_data, var_type)\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step3. Save all the results\"\"\"\n gpvals_file_name = output_dir + \"global_pvalue.txt\"\n np.savetxt(gpvals_file_name, gpvals)\n lpvals_fdr_file_name = output_dir + \"local_pvalue_fdr.txt\"\n np.savetxt(lpvals_fdr_file_name, lpvals_fdr)\n clu_pvals_file_name = output_dir + \"cluster_pvalue.txt\"\n np.savetxt(clu_pvals_file_name, clu_pvals)" ]
[ "0.64618134", "0.608151", "0.59120786", "0.5877814", "0.56381893", "0.5586844", "0.5561171", "0.5528533", "0.5522206", "0.55164164", "0.55111134", "0.5507343", "0.54734784", "0.5462257", "0.54563326", "0.5446462", "0.54454964", "0.54451734", "0.542079", "0.5415783", "0.5409398", "0.54075074", "0.53940713", "0.53821194", "0.536265", "0.5356185", "0.5346772", "0.53284097", "0.53200626", "0.5310697", "0.5307058", "0.52992517", "0.5292286", "0.52680486", "0.52602047", "0.5244013", "0.52310914", "0.5216283", "0.52065045", "0.5172296", "0.51700985", "0.51653254", "0.5163819", "0.5163191", "0.51594007", "0.5159084", "0.5155321", "0.515278", "0.5144576", "0.51299", "0.51298517", "0.51275367", "0.5123815", "0.5121105", "0.51173073", "0.5116884", "0.5099754", "0.50948393", "0.5088238", "0.5087598", "0.5086545", "0.5083768", "0.5080586", "0.5080552", "0.50775266", "0.5074551", "0.50649637", "0.50642836", "0.5062636", "0.50617176", "0.50592095", "0.50592065", "0.50554895", "0.5053137", "0.5051609", "0.50268537", "0.5019739", "0.50191283", "0.50084853", "0.5006477", "0.5004477", "0.5001843", "0.5001108", "0.49943894", "0.49943322", "0.49916068", "0.4987584", "0.4976403", "0.49759373", "0.49748805", "0.49735197", "0.49689347", "0.49657992", "0.49645218", "0.4956794", "0.49557295", "0.49523148", "0.4952081", "0.49506965", "0.49482515" ]
0.6006584
2
Store result of current timing run
def store_result(duration, loci_number): print(' %ds for %d loci' % (duration, loci_number)) if os.path.isfile(out_fname): with open(out_fname, 'r') as fd: cur = json.load(fd) else: cur = [] with open(out_fname, 'w') as fd: cur.append((loci_number, duration)) json.dump(cur, fd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_result_from_run(self, result):\n with self._result_lock:\n self._results.append(result)\n\n # A race here isn't problematic. Better not to hold the lock during an\n # is_crash call.\n if not self.last_failing_result and result.is_crash():\n self.last_failing_result = result", "def saveResultInit(timer, currentAttribute, totalAttribute, currentResult):\n (currentAttribute, totalAttribute) = saveResult(timer, \n currentAttribute, \n totalAttribute, \n currentResult)\n init_value = currentResult[timer.timesteps_used_horizon-1]\n \n return (currentAttribute, totalAttribute, init_value)", "def saveResult(timer, currentAttribute, totalAttribute, currentResult):\n # Get current and final position\n currentPosition = timer.current_timestep\n finalPosition = currentPosition + timer.timesteps_used_horizon\n \n # Only save the first values that are not overwritten later\n requiredResult = currentResult[0:timer.timesteps_used_horizon]\n \n # Save the results\n currentAttribute = requiredResult\n totalAttribute[currentPosition:finalPosition] = requiredResult\n \n return (currentAttribute, totalAttribute)", "def measure(self):\n # --- perform repeated runs\n for i_run in range(self.n_runs):\n if self.verbosity > 0:\n print(\"Run {0} / {1} ...\".format(i_run, self.n_runs), end = '')\n tdelta = self._timed_execute()\n self._run_times[i_run] = tdelta\n\t\t\t\n if self.verbosity == 2:\n print(tdelta)\n \n # calculate mean\n self._tmean = np.mean(self._run_times)\n # calculate standard deviation\n self._tstdev = np.std(self._run_times)\n # allow access to results\n self.__hasrun = True", "def setResults(self, tSto):\n results = handleData.saveResultInit(self.environment.timer,\n self.currentTSto,\n self.totalTSto,\n tSto)\n (self.currentTSto, self.totalTSto, self.tInit) = results", "def get_execution_time(self):\n self.execution_time = self.end_time - self.start_time\n\n print('\\n')\n self.message('**[OPERATION COMPLETE]**********************************************************************')\n if self.arg_data:\n self.message(' Execution Time: {} ms'.format(self.execution_time))\n self.message('********************************************************************************************')\n else:\n self.message(' Cell Updates: {}'.format(self.cells_updated))\n self.message(' Cell Additions: {}'.format(self.rows_appended))\n self.message(' Errors: {}'.format(self.errors))\n self.message(' Warnings: {}'.format(self.warnings))\n self.message(' Execution Time: {} ms'.format(self.execution_time))\n self.message('********************************************************************************************')", "def get_curr_exec_time(self):\n if self.type == 'normal':\n try:\n self.curr_exec_time = self.my_rand.gauss(self.runtime, self.stddev)\n except:\n if self.fwk.debug:\n print(\"not varying the execution time\")\n self.curr_exec_time = self.runtime\n raise\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_work':\n # this is a sandia style work task\n next_ckpt = self.sim.next_ckpt # relative work time\n work_todo = self.sim.total_work - self.sim.completed_work\n self.curr_exec_time = min(work_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_rework':\n next_ckpt = self.sim.next_ckpt # relative work time\n self.curr_exec_time = min(self.sim.rework_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_ckpt' or self.type == 'sandia_restart':\n self.curr_exec_time = self.runtime\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n else:\n print('error error error!!! problem with component type in get_curr_exec_time')\n raise", "def test_run_simulation_stores_result(self):\n sim = ss.Simulation()\n assert sim.results == []\n sim.run_simulation(10)\n assert sim.results != []\n assert len(sim.results) == 10", "def run(self, test):\n result = _TestResult(self.verbosity)\n\n test(result)\n self.stopTime = datetime.datetime.now()\n self._write_data(result)\n print(sys.stderr, '\\nTime Elapsed: %s' % (self.stopTime-self.startTime))\n return result", "def update_timeval(self):\n self.timeval = self.get_timeval()", "def record_response(self, result: str) -> None:\n self.finish_time = datetime.now()\n self.elapsed_time = (self.finish_time - self.start_time).total_seconds()\n self.result = result\n if not self.failed_when_contains:\n self.failed = False\n elif not any(err in result for err in self.failed_when_contains):\n self.failed = False", "def __call__(self):\n return self.timer()", "def report_total_usage(self):\n work_time = 0\n if self.type == 'normal':\n work_time = self.fwk.fwk_global_time - self.start_exec_time\n elif self.type == 'sandia_work':\n self.total_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_usage = self.total_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.completed_work += self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.sim.rework_todo += self.fwk.fwk_global_time - self.start_exec_time\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_rework':\n self.total_rework_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.next_ckpt = self.sim.ckpt_interval - (self.fwk.fwk_global_time - self.start_exec_time)\n self.sim.rework_todo -= self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_ckpt':\n self.total_ckpt_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_ckpt_usage = self.total_ckpt_time * self.nproc\n if self.state == \"running\":\n # update last ckpt\n self.sim.last_ckpt = self.sim.completed_work\n elif self.state == \"failed\":\n # add work to rework\n self.sim.rework_todo += self.sim.next_ckpt\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_restart':\n print(\"time spent in rework\", self.fwk.fwk_global_time - self.start_exec_time)\n self.total_restart_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_restart_usage = self.total_restart_time * self.nproc\n #if self.state == \"running\":\n # nothing to do?\n # pass\n if self.state == \"failed\":\n # gotta try again\n self.state = \"ready\"\n self.num_faults += 1\n else:\n print(\"problems updating state in report_total_usage\")\n raise\n if self.type == 'normal':\n if self.sim.state == 'rework':\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else: # sim.state == 'work'\n if self.retry:\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else:\n self.total_time += work_time\n self.total_usage = self.total_time * self.nproc", "def record_result(self, name, **kwargs):\n current = int(time.time())\n if name != self.test_name:\n LOGGER.debug('Set %d starting test %s at %d', self.port_set, self.test_name, current)\n self.test_name = name\n self.test_start = current\n result = {\n 'name': name,\n 'runid': self.run_id,\n 'started': self.test_start,\n 'timestamp': current,\n 'port': self.port_set\n }\n for arg in kwargs:\n result[arg] = None if kwargs[arg] is None else str(kwargs[arg])\n self.results[name] = result\n self.runner.gcp.publish_message('daq_runner', result)", "def experiment_callback(self, args):\n # If args is None, that means that an exception was raised during the\n # execution of the experiment. In such case, ignore it\n if not args:\n self.n_fail += 1\n return\n # Extract parameters\n params, results, duration = args\n self.n_success += 1\n # Store results\n self.results.add(params, results)\n self.exp_durations.append(duration)\n if self.n_success % self.summary_freq == 0:\n # Number of experiments scheduled to be executed\n n_scheduled = self.n_exp - (self.n_fail + self.n_success)\n # Compute ETA\n n_cores = min(mp.cpu_count(), self.n_proc)\n mean_duration = sum(self.exp_durations) / len(self.exp_durations)\n eta = timestr(n_scheduled * mean_duration / n_cores, False)\n # Print summary\n logger.info('SUMMARY | Completed: %d, Failed: %d, Scheduled: %d, ETA: %s',\n self.n_success, self.n_fail, n_scheduled, eta)", "def compute_result(self):\n sleep_time = np.random.randint(3)\n logger.info(\"sleeping for %d seconds\" % sleep_time)\n sleep(sleep_time)\n\n return self.x**2 + self.y**2 + np.random.randn()*0.1", "def output_result_eval(info_dict):\n time_dict = {'time' : str(datetime.now().strftime(\"%H:%M:%S\"))}\n result_dict = dict(time_dict, **info_dict)\n database.results_output_queue.put(result_dict)", "def evaluate(self, time) -> float:\n ...", "def save_current_run_time():\n # path = \"/Users/szou/Downloads/bu/happydogs/analytics_happydogs/last_time_run\" # hard coding this due to CRON, but will remove later\n output_file = open(\"last_time_run\", \"w\")\n current_time_string = datetime.datetime.strftime(\n datetime.datetime.now(), \"%Y-%m-%d %H:%M:%S\"\n )\n output_file.write(current_time_string)\n print(current_time_string)\n output_file.close()", "def _trigger(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def _store_result(self, task_id, result, status, traceback=None):\n session = Session()\n try:\n tasks = session.query(Task).filter(Task.task_id == task_id).all()\n if not tasks:\n task = Task(task_id)\n session.add(task)\n else:\n task = tasks[0]\n task.result = result\n task.status = status\n task.traceback = traceback\n if task.status == states.STARTED:\n task.date_began = datetime.now()\n session.commit()\n finally:\n session.close()\n return result", "def time_return(self):\n return self.time", "def experiment():\n state['result'] = \"bla\"", "def get_post_stats(self):\n stats = self.stats\n stats.results = self.job.result().get_counts(stats.iteration)\n stats.datetime = str(datetime.now())", "def _append_results(self) -> None:\n self._t_mps.compute_traces(self._step, self._process_tensors)\n time = self.time(self._step)\n norm = self._t_mps.get_norm()\n bond_dimensions = self._t_mps.get_bond_dimensions()\n self._results['time'].append(time)\n self._results['norm'].append(norm)\n self._results['bond_dimensions'].append(bond_dimensions)\n for sites, dynamics in self._results['dynamics'].items():\n if isinstance(sites, int):\n sites_list = [sites]\n else:\n sites_list = list(sites)\n dynamics.add(\n time,\n self._t_mps.get_density_matrix(sites_list))\n self._t_mps.clear_traces()", "def run(self):\n result = self.Take_Voltage_Measurement()\n self.result_queue.put(result)", "def run(self):\n result = self.Take_Voltage_Measurement()\n self.result_queue.put(result)", "def getTimes():", "def getTimes():", "def getTimes():", "def step(self):\n\n e = self.event_queue.get()\n self.current_time = e.time\n component = e.component\n component.output(self.current_time)\n component.input(self.current_time)\n component.fire()\n\n self.event_queue.put(VirtualTimeScheduler.Event(self.current_time + component.interval, component))\n\n return self.current_time", "def get_set_current(self):\n status = self.get_status_response()\n flags = status[1]\n if (flags & 0x2) == 0:\n return 0.0 #test isn't running\n current = status[3] + (status[4] * 0x100) + (status[5] * 0x10000) + (status[6] * 0x1000000)\n current = float(current)\n current /= (1000.0 * 1000.0)\n return current\n #end get_set_current()", "def output_stats(self):\n elapsed = self.timer.elapsed.total_seconds()\n count = self.copied + self.errored\n total = self.total\n # Time per key in milliseconds\n avg = round(elapsed / count * 1000, 3)\n # Time remaining in seconds\n remaining = 1.0 * elapsed / count * (total - count)\n # Time remaining in minutes\n remaining = round(remaining / 60.0, 1)\n # Time taken in minutes\n elapsed = round(elapsed / 60.0, 1)\n\n self.log.info(f\"{self.prefix}: {avg}ms avg, {elapsed}min passed, \"\n f\"{remaining}min remaining. ({count:,}/{total:,})\")", "def store(self, job, result):\n pass", "def compute(self):\n try:\n self.set_trackline()\n except:\n app.logger.warning(\"Could not process trackline results. URL may be invalid?\")\n\n if Job.exists(self.task_id, connection=redis_connection):\n job = Job.fetch(self.task_id, connection=redis_connection)\n self.task_result = unicode(job.meta.get(\"outcome\", \"\"))\n\n self.save()", "def time(self):\n\n self.timing = True\n self.scramble()\n\n self.disp = False", "def print_results():\n now_time = time.time()\n diff_time_in_sec = now_time - start_time\n generated_per_second = total / diff_time_in_sec\n generated_per_hour = 3600 * generated_per_second\n saved_per_second = success / diff_time_in_sec\n saved_per_hour = 3600 * saved_per_second\n\n os.system('cls' if os.name == 'nt' else 'clear')\n print(f\"{'Generated:' : <16}{total : <12}\")\n print(f\"{'New graphs:' : <16}{success : <12}\")\n print(f\"{'Success rate:' : <16}{round((success / total) * 100, 3) : <7} %\")\n print(f\"{'Speed:' : <16}{round(generated_per_hour) : <7} graphs/h\")\n print(f\"{'Save speed:' : <16}{round(saved_per_hour) : <7} graphs/h\")", "def set_result(self, result):\n self.__test_result[Result.__RESULT] = result", "def _timed_execute(self):\n tstart = time.perf_counter()\n self._func(*self._func_args, **self._func_kwargs)\n tend = time.perf_counter() \n\n tdelta = tend - tstart\n\n return tdelta", "def _store_result(self, field, value):\n\n self.session[self.name]['results'][field] = value", "def poll(self) -> Tuple[np.ndarray]:\n t = time.time()\n try:\n v = self.controller.get(self.pvname)\n\n except TimeoutError:\n print(f\"No process variable found for {self.pvname}\")\n v = DEFAULT_SCALAR_VALUE[self.pvname]\n\n self.time = np.append(self.time, t)\n self.data = np.append(self.data, v)\n\n return self.time - self.tstart, self.data", "def finish(self):\r\n if self._elapsed is None:\r\n self._elapsed = self._now() - self._start", "def _record_current_time(self):\n now = time.time()\n delta = now - self._last_time\n self._last_time = now\n self._timing_recorder.append(delta)", "def _get_running_time(self):\n time_sum = 0.0\n for subdir in os.listdir(self.path):\n if subdir.startswith('.'):\n continue\n try:\n line = open('{0}/{1}/{2}/out/OUTDOCK'.format(self.path, subdir, DOCKING_RUN_FILES),'r').readlines()[-1]\n if line.startswith('elapsed time'):\n time = float(line.split()[-1])\n time_sum = time_sum + time\n except:\n pass \n self.running_time = time_sum", "def track_duration(self):\n # raise NotImplementedError\n self.out_schema.append(\"run_duration\")\n self._track_duration = True\n # self.runner = GridExecutor.timer(self.runner)", "def evaluate(self) -> None:\n eval_results = {'segmentation': self.evaluate_segmentation()}\n if self.task == 'tracking':\n eval_results['tracking'] = self.evaluate_tracking()\n self.save_result(eval_results)", "def run(self, test):\n result = _TestResult(self.verbosity)\n\n self.start_time = datetime.datetime.now()\n test(result)\n self.stop_time = datetime.datetime.now()\n self.elapsed_time = self.stop_time - self.start_time\n print >>sys.stderr, '\\nTime Elapsed: %s' % self.elapsed_time\n\n report = self.generate_report(result)\n self.stream.write(report.encode('utf8'))\n\n return result", "def real_time(self):\n try:\n # TODO: Update for resuming runs\n with open(path.join(self.run_dir, \"TIMINGS\", \"timings.001\"), \"r\") as f:\n text = f.read()\n r = re.match(r\" Total time for loop was(?: *)(.*?)(?: *)seconds\", text, re.DOTALL + re.MULTILINE)\n if not r:\n logger.warning(\"Bad format in timings file. The real time could not be read.\")\n return float(\"nan\")\n else:\n return float(r.group(1))\n except FileNotFoundError:\n return float(\"nan\")", "def time_step_output(self, current_time, time_step):\n pass", "def actual_time():\n return _time.time()", "def realtime():\n return timemodule.time()", "def calculate(self):\n #runs = [ai\n # for ei in self.experiment_queues\n # for ai in ei.cleaned_automated_runs]\n #\n #ni = len(runs)\n #self.nruns = ni\n # for ei in self.experiment_queues:\n # dur=ei.stats.calculate_duration(ei.cleaned_automated_runs)\n # if\n\n\n tt = sum([ei.stats.calculate_duration(ei.cleaned_automated_runs)\n for ei in self.experiment_queues])\n self._total_time = tt\n offset = 0\n if self._start_time:\n offset = time.time() - self._start_time\n\n self.etf = self.format_duration(tt - offset)", "def pre_step(self,status):\n self.t0 = time.time()\n pass", "def runTillVerdict(self):\n while True:\n #we wait for a maximum of 60 seconds for each test to complete\n i = self.localShell.expect_list(self.logPatterns, 60)\n if i == 0:\n testCaseNumber = int(self.localShell.match.group(1))\n \n if (self.results['testIndex'] != testCaseNumber):\n print 'Warning: Missing tests between %s and %s' % (self.results['testIndex'], testCaseNumber)\n \n self.results['testIndex'] = testCaseNumber+1\n \n print '\\ttest %s completed' % testCaseNumber\n self.qmtTestEndHook(testCaseNumber)\n \n elif i == 1:\n print 'End reached:%s:' % self.localShell.match.group()\n self.results['verdict'] = 'PASS'\n break\n \n else:\n print 'EOF or timeout'\n self.results['verdict'] = 'FAIL'\n break\n\n return self.results", "def time(self):\r\n raise NotImplementedError", "def do_thing(scr_obj = None):\n res = {}\n\n t1 = time()\n if not scr_obj:\n return \"No task provided\"\n ags = scr_obj.system_instr()\n \n son = subprocess.Popen(ags)\n print \"spid %r pid %r \"%(son.pid,os.getpid())\n if os.waitpid(son.pid,0):\n \n res['duration'] = time()-t1\n return res", "def store_results(self, results_overall, results_single, featureDesp=None):\n # store experiment run and retrieve id\n # store aggregate results\n # store patient level results\n # happy end\n self.experiment_id = self.store_experiment(mode='r', featureDesp=featureDesp)\n self.log.debug(\"Stored experiment with id {}\".format(self.experiment_id))\n self.store_aggregate_results(results_overall)\n for patientId in self.patientIDs:\n if results_single[patientId] is None: continue\n self.store_patient_results(results_single[patientId], patientId)\n self.log.info(\"Finished experiment {}\".format(self.experiment_id))", "def result(self, result):\n self.stdout.write('RESULT {0}\\n{1}'.format(len(result), result))\n self.stdout.flush()", "def add_result(self, method_name, method_time, path, visited_nodes):\n self.results.append(Result(method_name, method_time, path, visited_nodes))", "def _run_time(func):\n start_time = datetime.datetime.now()\n func\n end_time = datetime.datetime.now()\n return end_time - start_time", "def get(self):\n if self.running:\n return self.accumulated_time + pg.time.get_ticks() - self.start_time\n else:\n return self.accumulated_time", "def next_run_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"next_run_time\")", "def next_run_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"next_run_time\")", "def execute(self):\n \n self.outvar = self.invar + .01", "def elapsed():\n global start_time\n return time.time() - start_time", "def main():\r\n\r\n print\r\n print '** Demonstrating new Timer print statement:'\r\n\r\n with Timer('Test Timer') as tm:\r\n current_second = 0\r\n while tm.current_result() < 5:\r\n if current_second != int(tm.current_result()):\r\n print '{s} second(s) elapsed.'.format(s=int(tm.current_result()))\r\n current_second = int(tm.current_result())\r\n\r\n print\r\n print '** Changing Timer unit and printing last result:'\r\n tm.unit = 'ms'\r\n print tm.last_result()", "def realtime(self):", "def t(self):\n return self._data_writer.get_current_run_time_ms()", "def result(self, result):\n self._result = result", "def result(self, result):\n self._result = result", "def experiment2():\n state['result'] = \"bla\"\n _bla = state['result']\n del state['result']", "def output_result(text):\n output_text = '[' + str(datetime.now().strftime(\"%H:%M:%S\")) + '] ' + text\n database.results_output_queue.put(output_text)", "def __call__(self):\r\n return self._acc.at(self._scheduler.currentTime)", "def finish(self):\n return self.this_evaluation.moment", "def _update_cmd_time_info(self, end=False):\n time_stamp = time.time()\n time_passed = time_stamp - self._start_time\n if end:\n docs_proc_now = self._docs_processed % self._file_write_threshhold\n if docs_proc_now == 0:\n msg = ('Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(self._docs_processed, time_passed))\n else:\n msg = ('Writing {} documents to file. '\n 'Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(\n docs_proc_now, self._docs_processed, time_passed))\n else:\n msg = ('Writing {} documents to file. '\n 'Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(self._file_write_threshhold,\n self._docs_processed, time_passed))", "def save_result(self):\n self.print_to_console()", "def log(result: ResultSnapshot):\n global results\n try:\n results[result_count] = result\n except IndexError:\n results.append(None)\n # we recursively try store the result,\n # since its possible the list isn't big enough for the current result_count\n log(result)", "def end_time(self, t):\r\n # Increase temperature while silent.\r\n if np.count_nonzero(self.next_note) == 0:\r\n self.silent_time += 1\r\n if self.silent_time >= NOTES_PER_BAR:\r\n self.temperature += 0.1\r\n else:\r\n self.silent_time = 0\r\n self.temperature = self.default_temp\r\n \r\n self.notes_memory.append(self.next_note)\r\n # Consistent with dataset representation\r\n self.beat_memory.append(compute_beat(t, NOTES_PER_BAR))\r\n self.results.append(self.next_note)\r\n # Reset next note\r\n self.next_note = np.zeros((NUM_NOTES, NOTE_UNITS))\r\n return self.results[-1]", "def _set_result(self, value):\n self._result = value\n self._state = FINISHED_STATE", "def run(self):\n game_time = 0\n while game_time < self.params.t:\n self.play_turn()\n game_time += 1\n\n reward = self.total_tx_reward\n successes = self.message_success_count\n self.reset() \n\n return reward / self.params.t, successes / self.params.t", "def save_result(self, value: Any) -> None:\n self.run_logger.set_tags({self.name: value})", "async def compute(self, duration: Optional[Number]) -> None:\n await envs.sleep(duration)\n self.total_compute_time += duration", "def store_results(self, event):\n\n # something wrong if you are here and no statistic is there\n if self.num_events_seen == 0:\n raise ValueError(\"No pedestal events in statistics, zero results\")\n\n container = event.mon.tel[self.tel_id].pedestal\n\n # mask the part of the array not filled\n self.sample_masked_pixels[self.num_events_seen:] = 1\n\n pedestal_results = self.calculate_pedestal_results(\n self.charges,\n self.sample_masked_pixels,\n )\n time_results = calculate_time_results(\n self.time_start,\n self.trigger_time,\n )\n\n result = {\n 'n_events': self.num_events_seen,\n **pedestal_results,\n **time_results,\n }\n for key, value in result.items():\n setattr(container, key, value)\n\n # update pedestal mask\n event.mon.tel[self.tel_id].pixel_status.pedestal_failing_pixels = \\\n np.logical_or(container.charge_median_outliers, container.charge_std_outliers)", "def exec_time_processor(self):\n with open(join(self.logs_dir, \"clock_time.dat\"), 'w') as fh:\n fh.write(\"Time ExecutionTime ClockTime\\n\")\n while True:\n rexp = (yield)\n fh.write(self.time_str + \"\\t\" +\n \"\\t\".join(x for x in rexp.groups()) + \"\\n\")\n self._tick = True", "def set_result(self, result):\n self._result = result\n self._set_done()", "def update():\n global iteration, result\n iteration += 1\n # Stop iterating after max_iterations\n if iteration >= max_iterations:\n timer.stop()\n print \"Output is\", result\n else:\n result = get_next(result)", "def do_timing(self):\n sts = datetime.datetime.now()\n _ = dep.read_env(get_path('good_env.txt'))\n ets = datetime.datetime.now()\n print(\"%.5f reads per second\" % (1. / (ets - sts).total_seconds(),))\n self.assertEquals(1, 2)", "def time(self):\n raise NotImplementedError()", "def _push(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def estimated_time(self):\n self._update()\n if not self.running_mode:\n return 0 if self._is_finished() else float(\"nan\")\n elif self.running_mode == \"local\":\n start = self.processes[0].create_time()\n elif self.running_mode == \"grid\":\n start = self.job[\"start_time\"]\n if start == 0:\n # Queued, but not started\n return float(\"nan\")\n else:\n logger.warning(\"Invalid running_mode attribute\")\n return float(\"nan\")\n current = self.current_step()\n if current <= 0: # If not dumped yet or error\n return float('nan')\n else:\n elapsed = time() - start\n return elapsed * (self.total_steps / current - 1)", "def time(self):\n return self._begin", "def run_timing():\n time_log = []\n while True:\n one_run = input(\"Enter your time for this 10 km: \")\n if not one_run:\n break\n try:\n time_log.append(float(one_run))\n except ValueError:\n print(\n \"Hey, you enter something strange, \"\n \"please enter a valid number\")\n avg_time = sum(time_log) / len(time_log)\n return f\"Your average time is about: {avg_time:.1f} \" \\\n f\"over {len(time_log)} runs\"", "def print_results(self):\n print(\"Total sec: \", self.total_sec)\n print(\"Total min: \", self.total_min)\n print(\"Total hours: \", self.total_hours)", "def run(self) -> Tuple[Any, Log]:\n return self._value", "def _set_stored(self, key, result):\n output = [i for i,o in enumerate(self._delayed_outputs) if o.key == key]\n if len(output) != 1:\n # TODO: this error doesn't actually currently propagate into the main thread\n # Also make a separate case for len > 1\n raise LookupError('Cannot find output with the given key')\n i = output[0]\n self._stored_mask[i] = True", "def __init__(self):\n self.last_result = \"\"", "def __last_time(self):\n if self.__stopped is not None:\n return self.__stopped\n return self.__time()", "def sync(self):\n print(\"{} started\".format(self))\n time.sleep(0.5)\n print(\"{} done\".format(self))\n return \"Result of {}\".format(self)", "def record_probe_result(self, task_id, load):\r\n assert get_param(\"record_task_info\")\r\n assert task_id < self.num_tasks\r\n self.probe_results[task_id] = load", "def get_elapsed_time(self):\r\n self.get_bb_result()\r\n csv_path = self.bb_log_path + os.sep + 'run-logs' + os.sep + 'BigBenchTimes.csv'\r\n if not os.path.isfile(csv_path):\r\n print('BigBenchTimes.csv does not exist in {0}, existing...'.format(self.bb_log_path))\r\n exit(-1)\r\n df = pd.read_csv(csv_path, delimiter=';').loc[:,\r\n ['benchmarkPhase', 'streamNumber', 'queryNumber', 'durationInSeconds']]\r\n elapsed_time = pd.DataFrame()\r\n is_exist = False\r\n for phase in ['POWER_TEST', 'THROUGHPUT_TEST_1']:\r\n benchmark_phase = (df['benchmarkPhase'] == phase)\r\n if any(benchmark_phase): # whether this phase exist in the BB logs\r\n if phase == 'POWER_TEST': # power test overall and each query\r\n stream_num = ((df['streamNumber']) == 0)\r\n query_num = (pd.notnull(df['queryNumber']))\r\n mask = benchmark_phase & stream_num & query_num\r\n seconds = df[mask]['durationInSeconds'].values\r\n elapsed_time.insert(0, phase, seconds)\r\n elapsed_time.index = df[mask]['queryNumber'].astype('int64')\r\n elif phase == 'THROUGHPUT_TEST_1':\r\n streams = int(np.max(df['streamNumber']))\r\n for stream in range(streams + 1):\r\n stream_num = ((df['streamNumber']) == stream)\r\n query_num = (pd.notnull(df['queryNumber']))\r\n mask = benchmark_phase & stream_num & query_num\r\n seconds = df[mask]['durationInSeconds'].values\r\n elapsed_time.insert(stream + 1, 'stream{0}'.format(stream), seconds)\r\n elapsed_time.index = df[mask]['queryNumber'].astype('int64')\r\n is_exist = True\r\n if is_exist:\r\n print('*' * 100)\r\n print('Elapsed time of each query:\\n {0} \\n'.format(elapsed_time.to_string()))\r\n\r\n result_path = self.bb_log_path + os.sep + 'bb_results.log'\r\n with open(result_path, 'a') as f:\r\n f.write('*' * 100 + '\\n')\r\n f.write('Elapsed time of each query:\\n {0} \\n'.format(elapsed_time.to_string()))\r\n else:\r\n print('It seems BigBenchTimes.csv in {0} does not include TPCx-BB phases:POWER_TEST, THROUGHPUT_TEST_1' \\\r\n 'existing...'.format(self.bb_log_path))\r\n exit(-1)" ]
[ "0.68735605", "0.65807796", "0.62916756", "0.6186423", "0.615599", "0.6137281", "0.60909104", "0.6023061", "0.60019714", "0.59586823", "0.594622", "0.5925285", "0.59251946", "0.5902604", "0.5852945", "0.58364284", "0.5809314", "0.5793759", "0.5784017", "0.5747181", "0.572242", "0.56981856", "0.5697849", "0.5695142", "0.5681201", "0.56806237", "0.56806237", "0.56775886", "0.56775886", "0.56775886", "0.56617844", "0.565884", "0.5655462", "0.5633722", "0.5630302", "0.5619516", "0.5605793", "0.56024086", "0.5598834", "0.55947244", "0.5587695", "0.5586973", "0.5583809", "0.55817896", "0.5577887", "0.55722433", "0.55687475", "0.55682784", "0.55224615", "0.55011564", "0.5499526", "0.5490791", "0.54866546", "0.5484877", "0.5481437", "0.54732937", "0.5456476", "0.54549915", "0.545162", "0.54511935", "0.54510385", "0.5441442", "0.5441442", "0.5439933", "0.54393786", "0.5439018", "0.542939", "0.54250854", "0.54193527", "0.54193527", "0.54156864", "0.5414721", "0.541203", "0.54088014", "0.5406767", "0.5404023", "0.54024166", "0.54005307", "0.53997636", "0.539373", "0.53913194", "0.5388945", "0.53875244", "0.5383277", "0.5381556", "0.53814447", "0.53813344", "0.5380359", "0.5378669", "0.5376557", "0.5367699", "0.5366486", "0.53628236", "0.53614223", "0.5358734", "0.5337323", "0.5335146", "0.5329258", "0.5323607", "0.53216255" ]
0.5750923
19
Plot time points given in data file and compare to x3
def plot_data(fname): if not os.path.isfile(fname): print('No data has been generated yet, aborting...') sys.exit(1) with open(fname, 'r') as fd: data = json.load(fd) x = np.arange(0, max(data, key=lambda e: e[0])[0], 1) const = .55e-8 func = lambda x: const * x**3 plt.plot( *zip(*data), label=r'ShRec3D data points', linestyle='None', marker='h' ) plt.plot(x, func(x), label=r'$ %.0e \cdot x^3$' % const) plt.title(r'Complexity ($\in \Theta\left(x^3\right)$) visualization of ShRec3D') plt.xlabel('loci number') plt.ylabel('execution time (seconds)') plt.legend(loc='best') plt.savefig('time_comparison.png', dpi=300, bbox_inches='tight') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def _figure_3():\n\n dataset_id = 3\n pkl_file = _pkl_file_path(dataset_id)\n with open(pkl_file, 'rb') as f:\n data = pickle.load(f)\n\n cdata = data[:, 33]\n seconds = np.arange(data.shape[0]) * 1. / 250\n\n plt.xlim(right=seconds[-1])\n plt.plot(seconds, cdata, color='black', linestyle=':')\n plt.ticklabel_format(useOffset=False)\n plt.xlabel('Second')\n plt.ylabel('Microstrain')\n plt.savefig('Figure3.png', dpi=300)\n plt.gcf().clear()", "def plot_three(estacion,formato):\n global num_ticks\n\n if formato == 'vladi':\n ruta='/home/oscar/Doctorado/GPS/programas/python/datos_vladi/completos/'\n ns_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat1'\n ew_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat2'\n up_file = ruta + 'mb_' + estacion.upper() + '_GP0.dat3'\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()[3:]\n ew_datos=ew_archivo.readlines()[3:]\n up_datos=up_archivo.readlines()[3:]\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ns_x,ns_y = rem_mean(ns_date,ns_data,ns_error)\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ns_y = ns_y *1e5\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ew_x,ew_y = rem_mean(ew_date,ew_data,ew_error)\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n ew_y = ew_y *1e5\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n up_x,up_y = rem_mean(up_date,up_data,up_error)\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n up_y = up_y *1e5\n elif formato == 'sara':\n ruta = '/home/oscar/Doctorado/GPS/programas/python/datos_sara/'\n ns_file = ruta + estacion.upper() + '/lat.' + estacion.lower() + '.dat'\n ew_file = ruta + estacion.upper() + '/long.' + estacion.lower() + '.dat'\n up_file = ruta + estacion.upper() + '/height.' + estacion.lower() + '.dat'\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()\n ew_datos=ew_archivo.readlines()\n up_datos=up_archivo.readlines()\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ns_x,ns_y = rem_mean(ns_date,ns_data,ns_error)\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ns_y = ns_y *1e5\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n ew_x,ew_y = rem_mean(ew_date,ew_data,ew_error)\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n ew_y = ew_y *1e5\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n up_x,up_y = rem_mean(up_date,up_data,up_error)\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n up_y = up_y *1e5\n elif formato == 'cabral':\n ruta = '/home/oscar/Doctorado/GPS/programas/python/datos_enrique_cabral/'\n ns_file = ruta + 'north_' + estacion.upper()\n ew_file = ruta + 'east_' + estacion.upper()\n up_file = ruta + 'vert_' + estacion.upper()\n ns_archivo=open(ns_file,'r')\n ew_archivo=open(ew_file,'r')\n up_archivo=open(up_file,'r')\n ns_datos=ns_archivo.readlines()[1:]\n ew_datos=ew_archivo.readlines()[1:]\n up_datos=up_archivo.readlines()[1:]\n ns_date=np.zeros((len(ns_datos),1))\n ns_data=np.zeros((len(ns_datos),1))\n ns_dat=np.zeros((len(ns_datos),1))\n ns_error=np.zeros((len(ns_datos),1))\n for i,lineas in enumerate(ns_datos):\n ns_date[i],ns_data[i],ns_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[8]\n ns_x = ns_date\n ns_y = ns_data\n ns_ticks,ns_labels = t_ticks(ns_x[0],ns_x[-1],num_ticks)\n ew_date=np.zeros((len(ew_datos),1))\n ew_data=np.zeros((len(ew_datos),1))\n ew_dat=np.zeros((len(ew_datos),1))\n ew_error=np.zeros((len(ew_datos),1))\n for i,lineas in enumerate(ew_datos):\n ew_date[i],ew_data[i],ew_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[8]\n ew_x = ew_date\n ew_y = ew_data\n ew_ticks,ew_labels = t_ticks(ew_x[0],ew_x[-1],num_ticks)\n up_date=np.zeros((len(up_datos),1))\n up_data=np.zeros((len(up_datos),1))\n up_dat=np.zeros((len(up_datos),1))\n up_error=np.zeros((len(up_datos),1))\n for i,lineas in enumerate(up_datos):\n up_date[i],up_data[i],up_error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[6]\n up_x = up_date\n up_y = up_data\n up_ticks,up_labels = t_ticks(up_x[0],up_x[-1],num_ticks)\n else:\n exit('[ERROR] Unrecognized format')\n\n ind = np.where(ns_x >= 2000)\n ns_x = ns_x[ind[0]]\n ns_y = ns_y[ind[0]]\n ind = np.where(ew_x >= 2000)\n ew_x = ew_x[ind[0]]\n ew_y = ew_y[ind[0]]\n ind = np.where(up_x >= 2000)\n up_x = up_x[ind[0]]\n up_y = up_y[ind[0]]\n\n plt.figure(num=None, figsize=(7, 13))\n plt.subplots_adjust(wspace=.05)\n plt.subplot(3,1,1)\n plt.grid()\n plt.plot(ns_x,ns_y,'ro',mec='green',mfc='red',mew=.5,ms=3.0,alpha=0.5)\n plt.ylabel('Milimeters')\n plt.xticks(ns_ticks,ns_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'NS'))\n plt.subplot(3,1,2)\n plt.grid()\n plt.plot(ew_x,ew_y,'ro',mec='blue',mfc='red',mew=.5,ms=3.0,alpha=0.5)\n plt.ylabel('Milimeters')\n plt.xticks(ew_ticks,ew_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'EW'))\n plt.subplot(3,1,3)\n plt.grid()\n plt.plot(up_x,up_y,'ro',mec='blue',mfc='green',mew=.5,ms=3.0,alpha=0.5)\n plt.xlabel('Years since %4.1f'% (up_date[0]))\n plt.ylabel('Milimeters')\n plt.xticks(up_ticks,up_labels,rotation=30)\n plt.xlim(ns_x[0], ns_x[-1])\n plt.title('%s - %s' % (estacion.upper(),'UP'))\n plt.subplots_adjust(bottom=0.1, top=0.95, hspace=.43)\n# plt.savefig(estacion.upper()+'_'+formato+'.jpg',dpi=300)\n plt.show()", "def plot_1():\n p_files = []\n filename = \"energy_data_2D_80\"\n for file in sorted(os.listdir(folder)):\n if file.startswith(filename):\n p_files.append(os.path.join(folder,file))\n T_list = []\n fig, ax = plt.subplots()\n for p_file in p_files[3::3]:\n T = (os.path.splitext(os.path.basename(p_file))[0]).split('_',4)[4]\n #print(T)\n E = []\n t = []\n if (T not in T_list):\n T_list.append(T)\n with open(p_file) as csvfile:\n lines = csv.reader(csvfile, delimiter=' ')\n sweep = 0\n for row in lines:\n E.append(float(row[0]))\n t.append(sweep)\n sweep += 1\n ax.plot(t[0:200], E[0:200],label=\"T = \"+format(T[0:3]))\n ax.set_title(\"Energy per bond vs Time\")\n ax.set_ylabel(\"e / J\")\n ax.set_xlabel(\"t / sweeps\")\n ax.legend()\n\n fig.savefig(folder2+\"energy_vs_time.png\")\n fig.savefig(texfolder+\"energy_vs_time.pdf\")", "def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")", "def draw_trajectory(filepath: str, timestamps: bool = False):\n\n t, x, y, z = coordinates.parse_coordinates_file(filepath=filepath)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n plt.xlabel('X', fontsize=10, rotation = 0)\n plt.ylabel('Y', fontsize=10, rotation = 0)\n ax.set_zlabel('Z', fontsize=10, rotation = 0)\n\n # Add timestamps to plot\n if timestamps:\n for i in range(len(t)):\n timea = str(datetime.timedelta(seconds=t[i]))\n ax.annotate(timea, (x[i], y[i], z[i]),)\n\n ax.scatter(x, y, z, label='Траектория движения НКА')\n # ax.legend()\n\n plt.show()", "def plot_and_save_3d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (3d)'+'-'*24\n \n print 'Loading force data...', \n data = load_file(path_name+file_name)\n t = data['t']\n dyn = 1.0\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # x-moment\n plt.figure(1)\n plt.plot(t, dyn*data['dyn']['MX'], t, data['static']['MX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mx')\n plt.title('Moment (x)')\n plt.grid()\n plt.savefig('%sMx.png' %pic_path)\n\n # y-moment\n plt.figure(2)\n plt.plot(t, dyn*data['dyn']['MY'], t, data['static']['MY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment (y)')\n plt.grid()\n plt.savefig('%sMy.png' %pic_path)\n\n # z-moment\n plt.figure(3)\n plt.plot(t, dyn*data['dyn']['MZ'], t, data['static']['MZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mz')\n plt.title('Moment (z)')\n plt.grid()\n plt.savefig('%sMz.png' %pic_path)\n \n # x-force\n plt.figure(4)\n plt.plot(t, dyn*data['dyn']['FX'], t, data['static']['FX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fx')\n plt.title('Fx')\n plt.grid()\n plt.savefig('%sFx.png' %pic_path)\n\n # y-force\n plt.figure(5)\n plt.plot(t, dyn*data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fy')\n plt.title('Fy')\n plt.grid()\n plt.savefig('%sFy.png' %pic_path)\n\n # z-force\n plt.figure(6)\n plt.plot(t, dyn*data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fz')\n plt.title('Fz')\n plt.grid()\n plt.savefig('%sFz.png' %pic_path)\n print 'done'\n\n #nice_looking_plots(t, data['dyn'], data['static'])\n\n if show:\n plt.show()", "def plot_data(self, filepath=None, time_min=None, time_max=None, title=None,\n electrode=None):\n\n # normalizes the samples x electrodes array containing the EEG data and\n # adds 1 to each row so that the y-axis value corresponds to electrode\n # location in the MNI coordinate (x,y,z) by electrode df containing\n # electrode locations\n\n if self.get_data().shape[0] == 1:\n nii = self.to_nii()\n nii.plot_glass_brain(pdfpath=filepath)\n elif self.get_data().empty:\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()\n else:\n Y = _normalize_Y(self.data) # self.get_data()) this allows us to plot all the electrodes even the recon ones\n\n if electrode is not None:\n Y = Y.loc[:, electrode]\n if len(Y.shape) > 1:\n for i, column in enumerate(Y):\n Y[column] = Y[column] - int(column) + i\n\n # divide index by sample rate so that index corresponds to time\n if self.sample_rate:\n Y.index = np.divide(Y.index,np.mean(self.sample_rate))\n\n # if a time window is designated index data in that window\n if all([time_min, time_max]):\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y = Y[mask]\n\n # if a time window is not designated, default to the first 500 seconds\n else:\n time_min = 0\n time_max = 10\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y= Y[mask]\n \n if electrode:\n if len(Y.shape) > 1:\n ax = Y.plot(title=title, lw=.6)\n else:\n ax = Y.plot(title=title, lw=.6, color='k')\n else:\n ax = Y.plot(legend=False, title=title, color='k', lw=.6)\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def plot_data_timeseries(X, output_file=None):\n\n n_features = X.shape[1]\n\n fig, ax = plt.subplots(nrows=n_features, figsize=(9, 4 * n_features),\n sharex=True, squeeze=False)\n\n for i in range(n_features):\n ax[i, 0].plot(X[:, i], '-')\n\n ax[i, 0].set_ylabel(r'$x_{:d}$'.format(i + 1))\n\n ax[i, 0].grid(ls='--', color='gray', alpha=0.5)\n\n if i == n_features - 1:\n ax[i, 0].set_xlabel('Time')\n\n if output_file is not None and output_file:\n plt.savefig(output_file, bbox_inches='tight')\n\n plt.show()", "def plot_x(t, x):\n plt.figure()\n plt.plot(t, x)\n plt.title(\"Vertical position of the skydiver as a function of time\")\n plt.xlabel(\"Time t [s]\")\n plt.ylabel(\"Height [m]\")\n plt.savefig('Parachute_position.png')", "def real_time_plot(files):\n global len_data, first_iter, colors\n\n for i,F in enumerate(files):\n\n # Load data\n data = pylab.loadtxt(F, delimiter=',', skiprows=1, usecols=(5,6,7))\n\n # Check if new data\n if (len_data!= len(data[:,0])):\n\n # Plot\n label = ntpath.basename(F)\n label = label[0:-4]\n ax.plot(data[:,0], data[:,1], data[:,2], colors[i], label=label)\n\n pyplot.draw()\n\n # Update globals\n len_data = len(data[:,0])\n\n if (first_iter == True):\n ax.legend()\n first_iter = False", "def plot_temp():\r\n work_book = xlrd.open_workbook(\"Temp.xls\")\r\n sheet1 = work_book.sheet_by_name(\"Temperature\")\r\n time_x = sheet1.col_values(1)\r\n temp_y = sheet1.col_values(0)\r\n plt.title(\"Time\")\r\n plt.xlabel(\"Time\")\r\n plt.ylabel(\"Temperature\")\r\n plt.plot(time_x, temp_y)\r\n plt.show()", "def loadAndPlot1DMassData(dataFile='movingPointMassData/testPointMassData000.pkl'):\n # Load the data back\n inputDataFile = open(dataFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Iterate over the different saved trajectores and plot out the results.\n for i in range(len(dataOut[0])):\n plt.figure(i)\n plt.plot(dataOut[0][i][1],dataOut[0][i][0])\n plt.show()", "def example_data_file():\n\n header1 = \"#Sample Interval: 0.100000 (seconds)\"\n header2 = \"Timestamp,AccelX,AccelY,RateX,RateY\"\n header3 = \"dd-mmm-yyyy HH:MM:SS.FFF,mm/s2,mm/s2,rad/s,rad/s\"\n\n start_date = dt.datetime(2016, 3, 17, 1, 0, 0)\n\n # Add one tenth of a second\n time_delta = dt.timedelta(0, 0, 0, 100)\n\n # Sample frequency in Hz\n sample_freq = 10\n\n # 20 in event duration in seconds\n Ts = 60 * 20\n\n # Number of points\n N = Ts * sample_freq\n\n # Array of times\n time = [start_date + i * time_delta for i in range(N)]\n time_str = [t.strftime(\"%Y-%m-%d %H:%M:%S.%f\") for t in time]\n\n ax, ay, Rx, Ry = example_data(sample_freq, Ts)\n\n data = [\n \",\".join([time_str[i], str(ax[i]), str(ay[i]), str(Rx[i]), str(Ry[i])]) for i in range(N)\n ]\n\n data.insert(0, header3)\n data.insert(0, header2)\n data.insert(0, header1)\n\n return \"\\n\".join(data)", "def coordinate_vs_time_plotter(array, xyz_axis=0, bird=0, axis_of_time_steps=2, start=0., end=1.):\r\n y_values = array[bird, xyz_axis, :]\r\n x_values = get_time_array(array, axis_of_time_steps, start, end)\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot()\r\n\r\n if xyz_axis == 0:\r\n ax.set_ylabel('X (m)')\r\n elif xyz_axis == 1:\r\n ax.set_ylabel('Y (m)')\r\n elif xyz_axis == 2:\r\n ax.set_ylabel('Z (m)')\r\n else:\r\n print(\"That is not a valid axis choice. Please choose one of: 0, 1, 2\")\r\n ax.set_xlabel('Time (s)')\r\n ax.scatter(x_values, y_values)\r\n return fig.show()", "def plot_data(self):", "def one_period_plot():\n file = \"Data/matfiles/20131221.mat\"\n object = MatReader(file)\n\n NeA = object.NeA\n latA = object.latA\n times = object.secondsA\n mlt = object.mltA\n ind1 = 2606 #lat inds\n ind2 = 13940 #lat inds\n \n ind1 = 3197 #mlat inds\n ind2 = 14390 #mlat inds\n \n T = ind2 - ind1\n ind1 += int(T/2)\n ind2 += int(T/2)\n\n latA = latA[ind1:ind2]\n NeA = NeA[ind1:ind2]\n # NeA = object.meanie(NeA, 5)\n times = times[ind1:ind2]\n mlt = mlt[ind1:ind2]\n mlt = hour_round(mlt)\n\n lats = np.zeros_like(latA)\n lats[0] = latA[0]\n for i in range(len(latA)-1):\n dlat = latA[i+1] - latA[i]\n if dlat < 0:\n lats[i+1] = lats[i] - dlat\n else:\n lats[i+1] = lats[i] + dlat\n\n lats += 90\n\n xticks = np.array([-90, -70, -30, 30, 70, 110, 150, 210, 250, 270]) + 90\n gridticks = np.array([-90, -70, -30, 30, 70, 77, 103, 110, 150, 210, 250, 270]) + 90\n # plt.plot(lats, NeA, \".\", markersize = 1)\n # plt.plot([0, 0], [0, np.max(NeA)], \"k\")\n # plt.plot([30, 30], [0, np.max(NeA)], \"k\")\n # plt.plot([60, 60], [0, np.max(NeA)], \"k\")\n # plt.plot([120, 120],[0, np.max(NeA)], \"k\")\n # plt.plot([150, 150], [0, np.max(NeA)], \"k\")\n # plt.plot([167, 167], [0, np.max(NeA)], \"k\")\n # plt.plot([193, 193], [0, np.max(NeA)], \"k\")\n # plt.plot([210, 210], [0, np.max(NeA)], \"k\")\n # plt.plot([240, 244], [0, np.max(NeA)], \"k\")\n # plt.plot([300, 300], [0, np.max(NeA)], \"k\")\n # plt.plot([330, 330], [0, np.max(NeA)], \"k\")\n # plt.plot([360, 360], [0, np.max(NeA)], \"k\")\n # plt.xticks(xticks)\n # plt.xlabel(\"Geomagnetic latitude going from 0 to 360 degrees, starting and ending at south pole\")\n # plt.ylabel(\"Electron density [cm$^{-1}$]\")\n # plt.title(\"One SWARM satellite period\")\n # plt.grid(\"on\", axis = \"x\", xdata = gridticks)\n #adding letters\n x = (gridticks[:-1] + gridticks[1:])/2 - 3\n y = np.zeros_like(x) - np.max(NeA)/40\n s = [\"S\", \"B\", \"A\", \"B\", \"C\", \"D\", \"C\", \"B\", \"A\", \"B\", \"S\"]\n # for i in range(len(x)):\n # plt.text(x[i], y[i], s[i], fontsize = 10)\n # plt.savefig(\"Figures/swarm_period.pdf\")\n # plt.show()\n\n # plt.plot(times, latA)\n # plt.plot(times, mlt)\n # plt.show()\n print(lats[0])\n print(lats[-1])\n \n fig, ax = plt.subplots()\n ax.plot(lats, NeA, \".\", markersize = 1)\n ax.set_xticks(xticks, minor=False)\n ax.set_xticks([167, 193], minor=True)\n ax.xaxis.grid(True, which = \"major\")\n ax.xaxis.grid(True, which = \"minor\")\n for i in range(len(x)):\n ax.text(x[i], y[i], s[i], fontsize = 10)\n ax.set_xlabel(\"Geomagnetic latitude going from 0 to 360 degrees, starting and ending at south pole\")\n ax.set_ylabel(\"Electron density [cm$^{-1}$]\")\n ax.set_title(\"One Swarm satellite period\")\n # plt.savefig(\"Figures/swarm_period.pdf\")\n plt.show()\n plt.plot(mlt, NeA)\n plt.show()\n plt.plot(mlt, lats)\n plt.show()", "def loadtcdat(filename= None):\n\n import numpy as np\n from StringIO import StringIO\n import Tkinter\n from tkFileDialog import askopenfilename\n from matplotlib.pyplot import figure,subplot,plot,xlabel,ylabel,title,legend\n\n if filename is not None:\n print \"Opening %s\\n\" %(filename)\n else:\n root = Tkinter.Tk()\n root.withdraw()\n filename = askopenfilename(parent=root, title='Open File',\n filetypes=[('csv files', '*.csv'),\n ('txt files', '*.txt')])\n root.destroy()\n root.mainloop()\n\n if filename is not None:\n f=open(filename)\n names = f.readline()\n names = names.strip('\\r\\n')\n names = names.split(\",\")\n f.close()\n\n data = np.genfromtxt(filename, delimiter=',',\n unpack=True, skip_header=2)\n time = data[0]\n\n figure()\n subplot(211)\n plot(time, data[1], label='Feed bin')\n plot(time, data[2], label='Part bin')\n ylabel(r'$ T_{bin} \\left(K\\right) $')\n legend(loc='best')\n\n subplot(212)\n plot(time,data[4],label='Feed bin heater')\n plot(time,data[5],label='Part bin heater')\n xlabel(r'$ Time \\left(s\\right) $')\n ylabel(r'$ P_{heater} \\left( \\frac{W}{m^2} \\right) $')\n legend(loc='best')\n\n return (data, time, names)", "def load_times(file_name):\n data = np.loadtxt(file_name)\n data = data[data[:, 0].argsort()]\n times = data[:, 0]\n values = data[:, 1]\n\n # Remove the mean amplitude and shift time origin\n times -= times[0]\n values -= np.mean(values)\n\n return times, values", "def _figure_2():\n\n dataset_id = 3\n pkl_file = _pkl_file_path(dataset_id)\n with open(pkl_file, 'rb') as f:\n data = pickle.load(f)\n\n cdata = data[:, 0]\n seconds = np.arange(data.shape[0]) * 1. / 250\n\n plt.xlim(right=seconds[-1])\n plt.plot(seconds, cdata, color='black', linestyle=':')\n plt.ticklabel_format(useOffset=False)\n plt.xlabel('Second')\n plt.ylabel('Microstrain')\n plt.savefig('Figure2.png', dpi=300)\n plt.gcf().clear()", "def plot_observed(self):\n \n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1)\n for k in self.observed_data.keys():\n plt.plot(self.observed_data[k][0], self.observed_data[k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n\n fig = plt.figure(figsize=(16,4))\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.observed_data.keys(): \n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.observed_data.keys():\n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def gentoplot(time):\n \n toplot = {}\n\n # Generates a list of movie paths in the data folder.\n files = dftf.batch_s('.') \n\n # Generates dft traces and plots for each roi in each movie.\n for file in files:\n os.chdir(file)\n print(os.path.basename(file))\n\n for col in COLS:\n \n if os.path.exists('params') == True:\n rawtracedata = dftf.TraceData(fname=RESULTS_FILE, paramsfile=PARAMS_FILE, \n corrparamsfile=CORRPARAMS_FILE, colname=col)\n td = rawtracedata.Processrawtrace(DFTSIZE, HZ_BOUND1, HZ_BOUND2)\n moviename = os.path.basename(os.path.abspath('.'))\n \n # Selects the area of the raw trace to plot.\n frames = time * td['fps']\n #print(frames)\n plottime = td['seltrace'][:frames]/10\n #print(len(plottime))\n ms = plottime-np.mean(plottime)\n xsec = np.linspace(0, len(plottime)/td['fps'], len(plottime))\n #print(xsec)\n condition = td['condition']\n toplot[moviename] = [xsec, ms, condition]\n print(np.max(ms), np.min(ms))\n \n return(toplot)", "def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()", "def test():\n data1 = resources_vs_time(0.0, 50)\n data2 = resources_vs_time(1.0, 10)\n data3 = resources_vs_time(2.0, 10)\n data4 = resources_vs_time(0.5, 10)\n print data1\n simpleplot.plot_lines(\"Growth\", 600, 600, \"time\", \"total resources\", [data1])", "def plot_timeDB(timeDB, xunits='yr', yunits='MPa', skip=8, P0=33.0):\n time, pressure = np.loadtxt(timeDB, skiprows=skip, unpack=True)\n pressure = pressure * P0\n\n #if xunits == 'yr':\n # time = time / 31536000.0\n #elif xunits == 'day':\n # time = time / 86400.0\n\n plt.figure()\n plt.plot(time,pressure,'b.-',lw=3,label='pressure')\n plt.xlabel('Time [{}]'.format(xunits))\n plt.ylabel('Pressure [{}]'.format(yunits))\n plt.title('Time History')\n plt.show()", "def plot_time_frames(self):\n\n fig = plt.figure()\n plt.grid(True)\n\n plt.ylim([-1.5,1.5])\n plt.xlim([0,1])\n\n for key in self.timeframes.keys():\n if key == 0:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)), linewidth=5)\n else:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)))\n\n plt.title(\"Wave at different times\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n # fig.savefig('results/pics_wave/vibrating_string_'+ self.type + '.png', dpi=150)", "def plot_xyz():\n plt.subplot(3,1,1) # for x axis\n plt.title('x value v.s. time')\n plt.grid(True)\n plt.ylabel('X')\n plt.xlabel('t')\n plt.plot(x, '-r')\n\n plt.subplot(3,1,2) # for y axis\n plt.title('y value v.s. time')\n plt.grid(True)\n plt.ylabel('Y')\n plt.xlabel('t')\n plt.plot(y, '-g')\n\n plt.subplot(3,1,3) # for z axis\n plt.title('z value v.s. time')\n plt.grid(True)\n plt.ylabel('Z')\n plt.xlabel('t')\n plt.plot(z, '-b')", "def plot_trajectories_XYZ(t_start,t_stop):\n \n time, ankle_l_trajectory, ankle_r_trajectory,foot_l_contact,foot_r_contact,muscle_lh_activations, muscle_rh_activations,muscle_lh_forces,muscle_rh_forces,joint_lh_positions,joint_rh_positions = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time = time[index_start:index_end+1]\n ankle_l_trajectory = ankle_l_trajectory[index_start:index_end+1,:]\n ankle_r_trajectory = ankle_r_trajectory[index_start:index_end+1,:]\n \n #time=np.linspace(1,len(ankle_l_trajectory[:,0]),len(ankle_l_trajectory[:,0]));\n \n plt.figure('Trajectories')\n plt.subplot(311)\n plt.plot(time,ankle_l_trajectory[:,0])\n plt.plot(time,ankle_r_trajectory[:,0])\n #plt.title('Trajectory of the X component')\n plt.xlabel('Time [s]')\n plt.ylabel('X Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(312)\n plt.plot(time,ankle_l_trajectory[:,1])\n plt.plot(time,ankle_r_trajectory[:,1])\n #plt.title('Trajectory of the Y component')\n plt.xlabel('Time [s]')\n plt.ylabel('Y Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(313)\n plt.plot(time,ankle_l_trajectory[:,2])\n plt.plot(time,ankle_r_trajectory[:,2])\n #plt.title('Trajectory of the Z component')\n plt.xlabel('Time [s]')\n plt.ylabel('Z Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n# plt.suptitle('Decomposition of the trajectories of the hind feet')\n return", "def line(self,file=None,file2=None,data=None,xmin=None,xmax=None,ymin=None,ymax=None,col=(0,1),col2=(0,1),xtitle='xtitle',ytitle='ytitle',title=' ',save=None,linewidth=2,label=None,label2=None,legendloc=None,sizex=6,sizey=4):\n import matplotlib.pyplot as plt\n import numpy as np\n\n self.version()\n fig = plt.figure(figsize=(sizex, sizey))\n ax = fig.add_subplot(111)\n if xmin is not None:\n axes = plt.gca()\n axes.set_xlim([xmin, xmax])\n axes.set_ylim([ymin, ymax])\n\n if file is not None:\n data = np.loadtxt(file,usecols=col, unpack=True) #Read columns\n count = 0\n np.delete(data[1],0)\n np.delete(data[0],0)\n for coord in data[0]:\n data[0][count] = coord - 90\n if coord <= 90:\n data[0][count] += 360\n count = count + 1\n\n\n\n #print(coord)\n for i in col2:\n if i == 0:\n continue\n plt.plot(data[0], data[i], linewidth=linewidth,label = label)\n\n\n if file2 is not None:\n data2 = np.loadtxt(file2,usecols=col2, unpack=True)\n count = 0\n\n data2[1][0] = data2[1][-1]\n data2[1][1] = data2[1][-2]\n #print(len(data[0]),len(data2))\n for coord in data2[0]:\n print(data2[1][count])\n data2[0][count] = coord - 90\n if coord <= 90 and coord >=0:\n data2[0][count] += 360\n\n #if coord >=89 and coord <=91:\n # print(data2[1][count])\n #if data2[0][count]>=250 and data2[0][count] <=300:\n # data2[1][count] = 0\n count = count + 1\n\n for i in col2:\n if i == 0:\n continue\n plt.plot(data2[0],data2[i],'--',linewidth=linewidth,label = label2)\n\n\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n ax.legend(loc=legendloc)\n \"\"\"Loc:\n best -- 0\n upper right -- 1\n upper left -- 2\n lower left -- 3\n lower right -- 4\n right -- 5\n center left -- 6\n center right -- 7\n lower center -- 8\n upper center -- 9\n center -- 10\n \"\"\"\n self.save(fig, save)\n\n return None", "def plot_spectra(path):\r\n plt.figure(figsize=(20, 10))\r\n x, y= np.loadtxt(fname=path, delimiter='\\t',dtype=int,\r\n usecols = (1,2), skiprows=100, unpack = True)\r\n plt.plot(x, y)\r\n return plt.show()", "def visualize_tma_time_series(data_path):\n\n X, y = load_tma_data(data_path)\n\n fig = plt.figure()\n ax = fig.add_subplot('111')\n\n for i in range(X.shape[0]):\n C = X[i, ...].reshape(X.shape[1], X.shape[2])\n l = y[i]\n ax.imshow(C, vmin=0, vmax=1)\n ax.set_title('Label : %i' % l)\n plt.pause(0.1)\n\n # labels = np.unique(y)\n # fig, axes = plt.subplots(figsize=(13, 4), ncols=4)\n # for i, l in enumerate(labels, start=0):\n # idx = np.where(y == l)[0]\n # temp = np.mean(X[idx, ...], axis=0)\n # temp[:8, :] = temp[:8, :]*6\n # pos = axes[i].imshow(temp, vmin=0, vmax=1)\n # axes[i].set_title(\"Label : %i\" % l)\n # fig.colorbar(pos, ax=axes[i])\n # plt.show()", "def plot_CC_frames(tt, x1, x3, T, dt):\n d_idx = int(T / dt)\n index_values = r_[np.arange(0, len(tt), d_idx), -1]\n\n # light gray -> black\n alpha = np.linspace(0.8, 0, len(index_values))\n\n\n for k,i in enumerate(index_values):\n CCframe(x1[i], x3[i], xy =0, alpha = alpha[k])\n\n\n pl.tick_params(\\\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n top = 'off', bottom='off', labelbottom='off') # ticks along the bottom edge are off\n\n pl.tick_params(\\\n axis='y', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n left='off', right = \"off\", labelleft='off')\n\n label_annotation(tt)", "def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()", "def fn_intensitytrace(file_name,folder,data,time,i,x,y):\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n figure_name=file_name+'_intensity_trace'\n for a in range(0,len(data),int(len(data)/2)):\n if i==a:\n x_coord=x[i+1]\n y_coord=y[i+1]\n max_int=np.max(data[i])\n min_int=np.min(data[i])\n #norm_int = [b / max_int for b in data[i]]\n plt.figure()\n #plt.plot(time[0:len(time)-1],norm_int,'g')\n plt.plot(time[0:len(time)-1],data[i],'g')\n plt.xlim(0, 100)\n plt.ylim(min_int, (max_int+100))\n plt.xlabel('Time (s)', fontname='Arial', fontsize=12)\n plt.ylabel('Photon counts (photons)', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial',fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.png', dpi=500)\n\n return (plt.show())", "def PlotTimes(metadata, data):\n\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style impulses')\n gp('set xtics 1')\n gp.clear()\n gp.xlabel('seconds')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n styles = {}\n line_style = 1\n\n for dataset in data:\n x = numpy.array(dataset.time, dtype='float_')\n if not dataset.name in styles:\n styles[dataset.name] = line_style\n line_style += 1\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='impulses ls %d' % styles[dataset.name])\n else: # no need to repeat a title that exists already.\n d = Gnuplot.Data(x, dataset.data,\n with_='impulses ls %d' % styles[dataset.name])\n\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')", "def plot_data(x, t, new_figure=True, save_path=None):\r\n # Plot the binary data\r\n ma = ['o', 's', 'v']\r\n fc = ['r', 'g', 'b'] # np.array([0, 0, 0]), np.array([1, 1, 1])]\r\n tv = numpy.unique(t.flatten()) # an array of the unique class labels\r\n if new_figure:\r\n plt.figure()\r\n for i in range(tv.shape[0]):\r\n pos = (t == tv[i]).nonzero() # returns a boolean vector mask for selecting just the instances of class tv[i]\r\n plt.scatter(numpy.asarray(x[pos, 0]), numpy.asarray(x[pos, 1]), marker=ma[i], facecolor=fc[i])\r\n\r\n plt.xlabel('$x_1$')\r\n plt.ylabel('$x_2$')\r\n\r\n if save_path is not None:\r\n plt.savefig(save_path, fmt='png')", "def figure_3():\n from matplotlib.animation import FuncAnimation\n from collections import OrderedDict\n\n plt.rc(\"font\", **{\n \"family\": \"sans-serif\",\n \"sans-serif\": [\"Helvetica\"],\n })\n plt.rc(\"text\", usetex=True)\n plt.rc(\"lines\", linewidth=1)\n plt.rc(\"axes\", grid=True)\n plt.rc(\"grid\", linestyle=\"--\", alpha=0.8)\n\n exp1 = \"rlcmrac-sac\"\n exp2 = \"clcmrac-nullagent\"\n exp_list = [exp1, exp2]\n\n data = {exp: get_data(exp) for exp in exp_list}\n\n fig, ax = plt.subplots(figsize=[16, 9])\n plt.xlabel(\"Time, sec\")\n\n linekw = dict(linewidth=1, alpha=0.3)\n\n def make_line(label, *args):\n ln, = plt.plot([], [], label=label, *args, **linekw)\n return ln\n\n time_list = [data[exp][\"time\"] for exp in exp_list]\n time = time_list[np.argmin(list(map(len, time_list)))]\n\n sample_data = list(data.values())[0]\n lines = [\n (\n make_line(\"Command\", \"k--\"),\n sample_data[\"cmd\"]\n ),\n (\n make_line(r\"$x_{r,1}$\", \"k\"),\n sample_data[\"state\"][\"reference_system\"][:, 0]\n ),\n (\n make_line(r\"$x_{r,2}$\", \"k\"),\n sample_data[\"state\"][\"reference_system\"][:, 1]\n ),\n (\n make_line(FORMATTING[exp2][\"label\"] + r\" $x_{1}\", \"r-.\"),\n data[exp2][\"state\"][\"main_system\"][:, 0],\n ),\n (\n make_line(FORMATTING[exp2][\"label\"] + r\" $x_{2}\", \"r-.\"),\n data[exp2][\"state\"][\"main_system\"][:, 1],\n ),\n (\n make_line(FORMATTING[exp1][\"label\"] + r\" $x_{1}\", \"b-.\"),\n data[exp1][\"state\"][\"main_system\"][:, 0],\n ),\n (\n make_line(FORMATTING[exp1][\"label\"] + r\" $x_{2}\", \"b-.\"),\n data[exp1][\"state\"][\"main_system\"][:, 1],\n ),\n ]\n\n dist_lines = {\n k: plt.vlines(\n [], [], [],\n colors=FORMATTING[k][\"color\"],\n alpha=1,\n label=FORMATTING[k][\"label\"],\n )\n for k in exp_list\n }\n\n plt.legend(handles=dist_lines.values())\n\n def to_segments(xs, ys):\n return [[[x, 0], [x, y]] for x, y in zip(xs, ys)]\n\n def init():\n states = np.hstack([\n data[exp][\"state\"][\"main_system\"].ravel() for exp in exp_list\n ])\n ylim_max = states.max()\n ylim_min = states.min()\n ylim_btw = 1.2 * (ylim_max - ylim_min)\n ylim_max, ylim_min = ylim_min + ylim_btw, ylim_max - ylim_btw\n\n ax.set_xlim(0, time[-1])\n ax.set_ylim(ylim_min, ylim_max)\n return [line for line, _ in lines] + list(dist_lines.values())\n\n def update(frame):\n for line, line_data in lines:\n line.set_data(time[:frame], line_data[:frame])\n\n # Update\n for i, exp in enumerate(exp_list):\n dist_time = data[exp][\"memory\"][\"t\"][frame]\n if i == 0:\n dist = 100 * data[exp][\"dist\"][frame]\n else:\n dist = [-1] * len(dist_time)\n\n segments = to_segments(dist_time, dist)\n dist_lines[exp].set_segments(segments)\n\n return [line for line, _ in lines] + list(dist_lines.values())\n\n anim = FuncAnimation(\n fig, update, init_func=init,\n frames=range(0, len(time), 10), interval=10\n )\n # plt.show()\n anim.save(os.path.join(args.save_dir, \"dist-movie.mp4\"))", "def parse_txt(txt, points, times, name, std=False):\n\n with open(txt, 'r') as f:\n\n a = []\n for line in f:\n a.append(line)\n\n T = 0\n while a[T].count('[ T ]') == 0:\n T += 1\n T += 1\n temp = []\n time = []\n while a[T] != '\\n':\n info = a[T].split(':')\n temp.append(int(info[0]))\n time.append(float(info[1]))\n T += 1\n\n for i, T in enumerate(temp):\n x = np.array([time[i], time[i]])\n y = np.array([0, 20])\n plt.plot(x, y, '--', linewidth=3, color='r')\n plt.annotate('T = %s' % T, xy=(time[i], np.mean(points)), xytext=(time[i] + 10, np.amax(points)))\n\n if std:\n std_equil = np.zeros([4, len(time) - 1])\n for i in range(len(time) - 1):\n begin = np.where(times == time[i])[0][0]\n end = np.where(times == time[i + 1])[0][0]\n slice = points[begin:end]\n equil = timeseries.detectEquilibration(slice)[0]\n avg = np.mean(slice[equil:])\n std = np.std(slice[equil:])\n print(avg, std)\n std_equil[:, i] = [avg, std, equil + begin, end]\n\n for i in range(std_equil.shape[1]):\n avg = std_equil[0, i]\n std = std_equil[1, i]\n x = np.array([times[int(std_equil[2, i])], times[int(std_equil[3, i])]])\n upperbound = np.array([avg + std, avg + std])\n lowerbound = np.array([avg - std, avg - std])\n plt.fill_between(x, upperbound, lowerbound, alpha=0.5, color='orange')\n # plt.plot(x, upperbound, '--', linewidth=1, color='orange')\n # plt.plot(x, lowerbound, '--', linewidth=1, color='orange')\n\n B = 0\n while a[B].count('[ bench_%s ]' % name) == 0:\n B += 1\n B += 1\n\n benchval = []\n bencherr = []\n y_low = [np.amin(points)]\n y_high = [np.amax(points)]\n\n while a[B] != '\\n' and B < len(a):\n info = a[B].split('+/-')\n benchval.append(float(info[0]))\n bencherr.append(float(info[1]))\n B += 1\n\n for i, v in enumerate(benchval):\n if i < len(temp) - 1:\n x = np.array([time[i], time[i + 1]])\n else:\n x = np.array([time[i], (time[i] + 1)*1000000])\n y = np.array([v, v])\n y_low.append(v - bencherr[i])\n y_high.append(v + bencherr[i])\n upperbound = np.array([v + bencherr[i], v + bencherr[i]])\n lowerbound = np.array([v - bencherr[i], v - bencherr[i]])\n plt.fill_between(x, upperbound, lowerbound, alpha=0.5, color='g')\n plt.plot(x, y, linewidth=2, color='black')\n # plt.plot(x, upperbound, '--', linewidth=1, color='b')\n # plt.plot(x, lowerbound, '--', linewidth=1, color='b')\n\n ybounds = np.array([min(y_low)*.99, max(y_high)*1.01])\n xbounds = np.array([-3, max(times)])\n\n return xbounds, ybounds", "def __plot(name, x, y):\n import matplotlib.pyplot as plt\n\n plt.plot(x, y)\n plt.xlabel('elements')\n plt.ylabel('time (seconds)')\n plt.savefig(\"{}\".format(name))", "def plot_overscan_variation(t_lst, overscan_lst, figfile):\n \n # Quality check plot of the mean overscan value over time \n fig = plt.figure(figsize=(8,6), dpi=150)\n ax2 = fig.add_axes([0.1,0.60,0.85,0.35])\n ax1 = fig.add_axes([0.1,0.15,0.85,0.35])\n #conversion of the DATE-string to a number\n date_lst = [dateutil.parser.parse(t) for t in t_lst]\n datenums = mdates.date2num(date_lst)\n\n ax1.plot_date(datenums, overscan_lst, 'r-', label='mean')\n ax2.plot(overscan_lst, 'r-', label='mean')\n for ax in fig.get_axes():\n leg = ax.legend(loc='upper right')\n leg.get_frame().set_alpha(0.1)\n ax1.set_xlabel('Time')\n ax2.set_xlabel('Frame')\n ax1.set_ylabel('Overscan mean ADU')\n ax2.set_ylabel('Overscan mean ADU')\n # adjust x and y limit\n y11,y12 = ax1.get_ylim()\n y21,y22 = ax2.get_ylim()\n z1 = min(y11,y21)\n z2 = max(y21,y22)\n ax1.set_ylim(z1,z2)\n ax2.set_ylim(z1,z2)\n ax2.set_xlim(0, len(overscan_lst)-1)\n # adjust rotation angle of ticks in time axis\n plt.setp(ax1.get_xticklabels(),rotation=30)\n\n # save figure\n fig.savefig(figfile)\n plt.close(fig)", "def plot_one(file, formato, debuger=False):\n global num_ticks\n if formato == 'vladi':\n archivo=open(file,'r')\n datos=archivo.readlines()[3:]\n sta_name=file[-13:-9]\n comp=file[-1]\n comp_name=['']\n if (comp == '1'):\n comp_name='NS'\n elif (comp=='2'):\n comp_name='EW'\n elif (comp=='3'):\n comp_name='Z'\n date=np.zeros((len(datos),1))\n data=np.zeros((len(datos),1))\n dat=np.zeros((len(datos),1))\n error=np.zeros((len(datos),1))\n for i,lineas in enumerate(datos):\n date[i],data[i],error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n a,b = rem_mean(date,data,error)\n ticks,labels = t_ticks(a[0],a[-1],num_ticks)\n b = b * 1e5\n elif formato == 'sara':\n archivo=open(file[0],'r')\n datos=archivo.readlines()\n sta_name=file[-8:-4]\n comp=file[-12:-9]\n comp_name=['']\n if (comp == 'lat'):\n comp_name='NS'\n elif (comp=='ong'):\n comp_name='EW'\n elif (comp=='ght'):\n comp_name='Z'\n date=np.zeros((len(datos),1))\n data=np.zeros((len(datos),1))\n dat=np.zeros((len(datos),1))\n error=np.zeros((len(datos),1))\n for i,lineas in enumerate(datos):\n date[i],data[i],error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[2]\n a,b = rem_mean(date,data,error)\n ticks,labels = t_ticks(a[0],a[-1],num_ticks)\n b = b * 1e5\n elif formato == 'cabral':\n archivo=open(file,'r')\n datos=archivo.readlines()[1:]\n sta_name=file[-4:]\n comp=file[-9:-5]\n if (comp == 'orth'):\n comp_name='NS'\n date=np.zeros((len(datos),1))\n data=np.zeros((len(datos),1))\n dat=np.zeros((len(datos),1))\n error=np.zeros((len(datos),1))\n for i,lineas in enumerate(datos):\n date[i],data[i],error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[8]\n elif (comp=='east'):\n comp_name='EW'\n date=np.zeros((len(datos),1))\n data=np.zeros((len(datos),1))\n dat=np.zeros((len(datos),1))\n error=np.zeros((len(datos),1))\n for i,lineas in enumerate(datos):\n date[i],data[i],error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[8]\n elif (comp=='vert'):\n comp_name='Z'\n date=np.zeros((len(datos),1))\n data=np.zeros((len(datos),1))\n dat=np.zeros((len(datos),1))\n error=np.zeros((len(datos),1))\n for i,lineas in enumerate(datos):\n date[i],data[i],error[i]=lineas.split()[0],lineas.split()[1],lineas.split()[6]\n# a,b = rem_mean(date,data,error)\n ticks,labels = t_ticks(date[0],date[-1],num_ticks)\n a = date\n b = data\n else:\n exit('[ERROR] Unrecognized format')\n\n # Se leen todas las lineas del archivo y se asigna a las variables\n # Se quita la media de los datos y del tiempo\n #ind = np.where(a >= 2010)\n #a = a[ind[0]]\n #b = b[ind[0]]\n\n sta_name=str(sta_name).upper()\n plt.figure()\n plt.plot(a,b,'ro',ms=3.0,alpha=0.5)\n plt.xlabel('Years since %4.1f'% (date[0]))\n plt.ylabel('Milimeters')\n plt.xticks(ticks,labels,rotation=45)\n plt.grid()\n plt.xlim(a[0], a[-1])\n plt.title('%s - %s' % (sta_name,comp_name))\n plt.subplots_adjust(bottom=0.15)\n plt.show()", "def plot_from_file(filename='Save_Data.txt'):\r\n txt = open(filename)\r\n data = []\r\n for line in txt:\r\n line = line.strip()\r\n line = shlex.split(line)\r\n if len(line) > 0:\r\n data.append(line)\r\n plot_E, w, length, intensity, reciprocal = [], [], [], [], []\r\n for d in data:\r\n plot_E.append(float(d[0])*1e-6)\r\n w.append(float(d[1]))\r\n length.append(float(d[2]))\r\n intensity.append(float(d[3]))\r\n reciprocal.append(1/(plot_E[-1]*length[-1]*intensity[-1]))\r\n reciprocal = np.array(reciprocal)\r\n reciprocal *= (np.max(intensity)-np.min(intensity))/(np.max(reciprocal)-np.min(reciprocal))\r\n reciprocal += ((np.min(intensity))-np.min(reciprocal))\r\n fig = plt.figure(figsize=[14, 4])\r\n ax = fig.add_subplot(1, 1, 1)\r\n fig.subplots_adjust(right=0.75)\r\n line1, = ax.plot(plot_E, length, 'r', lw=2, label='Target Distance')\r\n ax2 = ax.twinx()\r\n line2, = ax2.plot(plot_E, intensity, 'g', lw=2, label='Intensity Required')\r\n# line4, = ax2.plot(plot_E, reciprocal, 'k', lw=2, label='Reciprocal Product')\r\n ax3 = ax.twinx()\r\n ax3.spines['right'].set_position(('axes', 1.15))\r\n make_patch_spines_invisible(ax3)\r\n ax3.spines['right'].set_visible(True)\r\n line3, = ax3.plot(plot_E, w, 'b', lw=2, label='Collimator Width')\r\n ax.set_xlabel(r'Electric Field Strength / MVm$^{-1}$', fontsize=20)\r\n ax.set_xlim(np.min(plot_E), np.max(plot_E))\r\n ax.set_ylabel('Target Distance / m', fontsize=20, color=line1.get_color())\r\n ax2.set_ylabel(r'Intensity / I$_0$', fontsize=20, color=line2.get_color())\r\n ax3.set_ylabel('Collimator Width / mm', fontsize=20, color=line3.get_color())\r\n ax.tick_params(axis='y', colors=line1.get_color())\r\n ax2.tick_params(axis='y', colors=line2.get_color())\r\n ax3.tick_params(axis='y', colors=line3.get_color())\r\n lines = [line1, line2, line3]\r\n ax.legend(lines, [l.get_label() for l in lines], loc='upper center', fontsize=15)\r\n ax.set_xscale('log')\r\n ax.set_xticks([10, 100, 1000])\r\n ax.set_xticklabels([10, 100, 1000])\r\n ax.minorticks_on()\r\n ax2.minorticks_on()\r\n ax3.minorticks_on()\r\n ax.grid()\r\n ax.set_title('Minimum required target distance and proton intensity and\\noptimum collimator width as a function of electric field strength', fontsize=16)\r\n plt.show()\r\n fig.savefig(f'E_vs_Length_and_Intensity_Wide.pdf', bbox_inches='tight')\r\n index = np.argmax(reciprocal)\r\n return [plot_E[index], length[index], intensity[index]]", "def logplot(in_dir, fname, xlim, ylim, title):\n\n with open(in_dir + fname,'r') as logfile:\n lf_lines = logfile.readlines()\n\n traj_x = []\n traj_y = []\n\n for row in lf_lines:\n if row[:4] == 'pose':\n #print(float(row[10:-2]))\n tup = row[7:]\n sep_pos = tup.find(' , ')\n traj_x.append(float(tup[:sep_pos]))\n traj_y.append(float(tup[sep_pos+3:]))\n\n liveplot(traj_x, traj_y, xlim, ylim, title)", "def plot_raw_eeg_data(time_data, eeg_data):\n plt.plot(time_data, eeg_data, 'g-')\n plt.xlabel(\"time [secs]\")\n plt.ylabel(\"raw EEG values\")\n plt.title(\"EEG Data\")\n # plt.xlim(min(x_data) - 1, max(x_data) + 1)\n # plt.ylim(min(y_data) - 1, max(y_data) + 1)\n plt.show()", "def ProfilePlot(t,y,z,scale=86400, axis=0,color=[0.5,0.5,0.5]):\r\n from matplotlib import collections\r\n from matplotlib.ticker import Formatter\r\n\r\n class MyFormatter(Formatter):\r\n def __init__(self, dates, fmt='%b %d %Y'):\r\n self.fmt = fmt\r\n self.dates = dates\r\n\r\n def __call__(self, x, pos=0):\r\n 'Return the label for time x s'\r\n return datetime.strftime(datetime(1990,1,1)+timedelta(seconds=x),self.fmt)\r\n\r\n tsec = othertime.SecondsSince(t)\r\n formatter = MyFormatter(tsec)\r\n \r\n y = np.swapaxes(y,0,axis)\r\n \r\n lines=[]\r\n line2 =[]\r\n for ii, tt in enumerate(tsec):\r\n #xplot = set_scale(y[:,ii],tt)\r\n xplot = tt + y[:,ii]*scale\r\n lines.append(np.array((xplot,z)).T)\r\n line2.append(np.array([[tt,tt],[z[0],z[-1]]]).T)\r\n \r\n \r\n LC1 = collections.LineCollection(lines,colors=color,linewidths=1.5)\r\n LC2 = collections.LineCollection(line2,colors='k',linestyles='dashed') # Zero axis\r\n \r\n ax=plt.gca()\r\n ax.add_collection(LC1)\r\n ax.add_collection(LC2)\r\n ax.set_ylim((z.min(),z.max()))\r\n ax.xaxis.set_major_formatter(formatter)\r\n ax.set_xlim((tsec[0],tsec[-1]))\r\n plt.xticks(rotation=17) \r\n \r\n return ax", "def plot_data(heart_filt, pace_filt):\n\n plt.figure(1)\n plt.plot(heart_filt, pace_filt)\n plt.show()", "def test_3d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n db.close()\n\n date = pd.to_datetime('2015-04-01')\n self.full_iv.get_data()\n df_date0 = self.full_iv.df_all.query('date == %r' % date)\n df_date1 = df_iv.query('date == %r' % date)\n df_date = pd.concat([df_date0, df_date1])\n \"\"\":type: pd.DataFrame\"\"\"\n\n x = df_date['dte']\n y = df_date['strike']\n z = df_date['impl_vol']\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n # noinspection PyUnresolvedReferences\n ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n # ax.plot_wireframe(x, y, z, rstride=1, cstride=1)\n plt.show()", "def make_plot(solution, t, plot_Ts, plot_T1, plot_T2, xaxis, cc, delta_cc, albedo,delta_albedo\\\n , em1, delta_em1, em2, delta_em2):\n\n plt.close('all')\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n \n if xaxis == 'cloud cover':\n inc_cc = []\n for i in range(len(solution[0,:])):\n inc_cc.append(cc + (i*delta_cc)/calcs_per_timestep)\n\n if plot_Ts == 'On': ax1.plot(inc_cc,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_cc,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_cc,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n\n elif xaxis == 'time':\n \n #for i in range(len(solution[0,:])):\n #t.append(i*(timestep/calcs_per_timestep))\n \n if plot_Ts == 'On': ax1.plot(t,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(t,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(t,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'albedo':\n inc_alb = []\n for i in range(len(solution[0,:])):\n inc_alb.append(albedo+(i*delta_albedo)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_alb,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_alb,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_alb,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon1':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em1+(i*delta_em1)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon2':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em2+(i*delta_em2)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n else: raise ValueError('No x axis selected')\n \n fig.suptitle('Global Average Temperature')\n ax1.set_title(f'Final Surface Temperature = {round(solution[0,-1],2)} K')\n ax1.legend()\n\n if xaxis == 'cloud cover': ax1.set_xlabel('Cloud Cover (%)')\n elif xaxis == 'time': ax1.set_xlabel('Time (years)')\n elif xaxis == 'albedo': ax1.set_xlabel('Albedo')\n elif xaxis == 'epsilon1': ax1.set_xlabel(u'\\u03B5\\u2081')\n elif xaxis == 'epsilon2': ax1.set_xlabel(u'\\u03B5\\u2082')\n plt.ylabel('Temerature (K)')\n return fig", "def plot_tseries(*args, **kwargs) :\n data = kwargs.pop('data')\n return data.dropna().plot(x=args[0], y=args[1], **kwargs)", "def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()", "def show_track(name_of_track):\n\n df = pd.read_csv(name_of_track, index_col=0)\n\n x = df[\"x\"][:] # simple x and y position of the track\n y = df[\"y\"][:]\n\n plt.plot(x[:2500], y[:2500], \"r.\")\n plt.plot(x[2501:5000], y[2501:5000], \"g.\")\n plt.plot(x[5001:7500], y[5001:7500], \"b.\")\n plt.plot(x[7501:10000], y[7501:10000], \"y.\")\n plt.show()", "def showPlot3():\n interested_in = [(20,20),(25,16),(40,10),(50,8),(80,5),(100,4)]\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(2, 1.0, item[0], item[1], 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot([1,1.56,4,6.25,16,25], proc_sim_data)\n title('Dependence of cleaning time on room shape')\n xlabel('ratio of width to height')\n ylabel('mean time (clocks)')\n show()", "def view(filename):\n n, data, data_dB,sr,ch=inputwav(filename)\n t=np.linspace(0,n/sr,n)\n py.close()\n fig, (ax1) = py.subplots(nrows=1) \n ax1.plot(t[0:n:100],data[0:n:100],'k-',linewidth=1,label=filename)\n ax1.legend(loc=1)\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')", "def animate1Dframes(x, data):\n plt.ion() # Set the plot to animated. \n ax1 = plt.axes()\n line, = plt.plot(data[-1], x , '-*k')\n\n for u in data:\n line.set_xdata(u)\n plt.draw()\n #tm.sleep(0.25)", "def check_plots(tracklist_subset, tracklist):\n\n for trial in tracklist_subset: # Iterates over all available trials\n\n # Scale time for colormap\n scaled_time = (tracklist[trial]['data'].index.values -\n tracklist[trial]['data'].index.values.min()) / \\\n tracklist[trial]['data'].index.values.ptp()\n timemax = max(tracklist[trial]['data'].index.values)\n data = tracklist[trial]['data']\n colors = plt.cm.cubehelix(scaled_time)\n m = cm.ScalarMappable(cmap=cm.cubehelix)\n m.set_array(tracklist[trial]['data'].index.values)\n\n # filename = str(trial) + '.pdf'\n\n fig = plt.figure(figsize=(20, 20))\n fig.suptitle(tracklist[trial]['sequence'] + ' ' +\n tracklist[trial]['behavior'])\n\n ax1 = fig.add_subplot(4, 2, 1, projection='3d')\n ax1.set_title('Pt 1 Position')\n ax1.scatter3D(xs=tracklist[trial]['data']['pt1x_smth'],\n ys=tracklist[trial]['data']['pt1y_smth'],\n zs=tracklist[trial]['data']['pt1z_smth'],\n zdir='z', s=3, c=colors, marker='o',\n edgecolor='none') # 3D Scatter plot\n ax1.autoscale(enable=True, tight=True)\n ax1.set_xlabel('X position')\n ax1.set_ylabel('Y position')\n ax1.set_zlabel('Z position')\n plt.colorbar(m, shrink=0.5, aspect=10)\n\n ax2 = fig.add_subplot(4, 2, 3)\n ax2.plot(data.index.values, data['pt1y_v_smth'], 'bo')\n ax2.set_ylabel('Y velocity (cm/s)', color='b')\n ax2.tick_params('y', colors='b')\n\n ax3 = fig.add_subplot(4, 2, 5)\n ax3.plot(data.index.values, data['pt1_net_v'], 'bo')\n ax3.set_ylabel('Net Velocity (cm/s)', color='b')\n ax3.tick_params('y', colors='b')\n\n ax4 = fig.add_subplot(4, 2, 7)\n ax4.plot(data.index.values, data['pt1_net_a'], 'bo')\n ax4.set_ylabel('Net accel (cm/s2)', color='b')\n ax4.tick_params('y', colors='b')\n\n ax5 = fig.add_subplot(4, 2, 2, projection='3d')\n ax5.set_title('Pt 2 Position')\n ax5.scatter3D(xs=tracklist[trial]['data']['pt2x_smth'],\n ys=tracklist[trial]['data']['pt2y_smth'],\n zs=tracklist[trial]['data']['pt2z_smth'],\n zdir='z', s=3, c=colors, marker='o',\n edgecolor='none') # 3D Scatter plot\n ax5.autoscale(enable=True, tight=True)\n ax5.set_xlabel('X position')\n ax5.set_ylabel('Y position')\n ax5.set_zlabel('Z position')\n plt.colorbar(m, shrink=0.5, aspect=10)\n\n ax6 = fig.add_subplot(4, 2, 4)\n ax6.plot(data.index.values, data['pt2y_v_smth'], 'bo')\n ax6.set_ylabel('Y velocity (cm/s)', color='b')\n ax6.tick_params('y', colors='b')\n\n ax7 = fig.add_subplot(4, 2, 6)\n ax7.plot(data.index.values, data['pt2_net_v'], 'bo')\n ax7.set_ylabel('Net Velocity (cm/s)', color='b')\n ax7.tick_params('y', colors='b')\n\n ax8 = fig.add_subplot(4, 2, 8)\n ax8.plot(data.index.values, data['pt2_net_a'], 'bo')\n ax8.set_ylabel('Net accel (cm/s2)', color='b')\n ax8.tick_params('y', colors='b')\n\n plt.show()", "def plotPerTimeStamp(options):\n name = options['name'] + '_' + options['scan'] + '_perTime'\n if options['extra']:\n name += '_' + options['extra']\n f = openRootFileR(options['name']+'_perTime')\n histname = plotName(name, timestamp=False)\n filename = plotName(name, timestamp=True)\n filepath = plotPath(name, timestamp=True)\n print '<<< Save plot:', filepath\n hist = f.Get(histname)\n hist.SetErrorOption(options['error'])\n if options['big']:\n canvas = TCanvas('c', '', 8000, 1200)\n else:\n canvas = TCanvas('c', '', 1400, 500)\n canvas.SetLogy(options['logy'])\n gStyle.SetOptStat(options['optstat'])\n hist.Draw()\n gPad.Update()\n hist.GetXaxis().SetTimeDisplay(1)\n hist.GetXaxis().SetTimeFormat('#splitline{%d.%m.%y}{%H:%M:%S}%F1969-12-31' \\\n +' 22:00:00')\n hist.GetXaxis().SetLabelOffset(0.03)\n hist.GetXaxis().SetTitle('')\n if 'xmin' in options and 'xmax' in options:\n hist.GetXaxis().SetRangeUser(options['xmin'], options['xmax'])\n hist.GetYaxis().SetTitle(options['ytitle'])\n hist.GetYaxis().SetTitleOffset(1.2)\n for axis in [hist.GetXaxis(), hist.GetYaxis()]:\n axis.SetTitleFont(133)\n axis.SetTitleSize(16)\n axis.SetLabelFont(133)\n axis.SetLabelSize(12)\n axis.CenterTitle()\n if options['big']:\n axis.SetTickLength(0.01)\n if options['big']:\n hist.GetYaxis().SetTitleOffset(0.25)\n drawSignature(filename)\n gPad.Modified()\n gPad.Update()\n if options['retrn']:\n return [canvas, hist, f]\n else:\n canvas.Print(filepath)\n canvas.Close()\n closeRootFile(f, options['name']+'_perTime')", "def plot_file(filename, params):\n\tarr = None\n\twith open(filename) as filep:\n\t\tarr = json.load(filep)\n\tplot_data(arr, params)", "def general_plot(filepath, system_information, x_value_type, figsize=(7, 5)):\n general_data = load_data(filepath)\n fig = plt.figure(figsize=figsize)\n\n # NOTE: To make this function work, the x_label and x value data can only be in the same order as we do in here.\n # prepare data\n\n keys = general_data.keys()\n\n if x_value_type == 'nodes_cores':\n x_label = general_data[keys[0]]\n elif x_value_type == 'x_value':\n x_label = general_data[keys[5]]\n else:\n raise ValueError(\"Unsupport x_value_type input\")\n\n T_first = general_data[keys[1]]\n T_second = general_data[keys[2]]\n T_third = general_data[keys[3]]\n T_fourth = general_data[keys[4]]\n\n # making plot\n plt.plot(x_label, T_first, '.-', label=keys[1])\n plt.plot(x_label, T_second, '.-', label=keys[2])\n plt.plot(x_label, T_third, '.-', label=keys[3])\n plt.plot(x_label, T_fourth, '.-', label=keys[4])\n plt.legend()\n\n title = system_information + \" General Time \" + \"(\" + x_value_type + \")\"\n plt.title(title)\n plt.xlabel(x_value_type)\n plt.ylabel(\"time/s\")\n fig.autofmt_xdate()\n\n plt.savefig(system_information + \"_total.jpg\")\n plt.close()", "def time_analysis(self, time_points, plot=False, interval=1800):\n\n first_day = int(min(time_points)/86400)\n\n dyn_cl = dynamic_clusters()\n for t in time_points:\n day = int(t/86400)-first_day+1\n #print day\n time_in_day = t%86400 #in seconds\n dyn_cl.add_element(day,time_in_day) \n\n timestamps_vec = time_wrap(time_points)[0] \n fitting = activity_time(timestamps_vec, interval=interval)\n\n self.methods[\"time_dyn_clst\"] = dyn_cl\n self.methods[\"time_fitting\"] = fitting\n if plot: self.temporal_plot(vis=False)\n rospy.loginfo('Done\\n')", "def main():\n (time, heart_rate, pace) = parse_file()\n (hr_filt, v_filt) = smoothing(time, heart_rate, pace)\n plot_data(hr_filt, v_filt)", "def epics_data_plot(data):\n if isinstance(data, (xr.DataArray, xr.Dataset)):\n data = data.to_dataframe()", "def figure_2():\n from matplotlib.animation import FuncAnimation\n from collections import OrderedDict\n\n plt.rc(\"font\", **{\n \"family\": \"sans-serif\",\n \"sans-serif\": [\"Helvetica\"],\n })\n plt.rc(\"text\", usetex=True)\n plt.rc(\"lines\", linewidth=1)\n plt.rc(\"axes\", grid=True)\n plt.rc(\"grid\", linestyle=\"--\", alpha=0.8)\n\n exp_list = os.listdir(BASE_DATA_DIR)\n exp_list.remove(\"tmp\")\n exp_list.remove(\"mrac-nullagent\")\n\n data = {exp: get_data(exp) for exp in exp_list}\n\n fig, ax = plt.subplots(figsize=[16, 9])\n plt.xlabel(\"Time, sec\")\n\n linekw = dict(linewidth=1, alpha=0.3)\n\n def make_line(label, *args):\n ln, = plt.plot([], [], label=label, *args, **linekw)\n return ln\n\n time_list = [data[exp][\"time\"] for exp in exp_list]\n time = time_list[np.argmin(list(map(len, time_list)))]\n lines = [\n (\n make_line(\"Command\", \"k--\"),\n data[\"fecmrac-nullagent\"][\"cmd\"]\n ),\n (\n make_line(r\"$x_{r,1}$\", \"k\"),\n data[\"fecmrac-nullagent\"][\"state\"][\"reference_system\"][:, 0]\n ),\n (\n make_line(r\"$x_{r,2}$\", \"k\"),\n data[\"fecmrac-nullagent\"][\"state\"][\"reference_system\"][:, 1]\n ),\n (\n make_line(r\"FE-CMRAC $x_{1}$\", \"r-.\"),\n data[\"fecmrac-nullagent\"][\"state\"][\"main_system\"][:, 0],\n ),\n (\n make_line(r\"FE-CMRAC $x_{2}$\", \"r-.\"),\n data[\"fecmrac-nullagent\"][\"state\"][\"main_system\"][:, 1],\n ),\n (\n make_line(r\"RL-CMRAC $x_{1}$\", \"b-.\"),\n data[\"rlcmrac-sac\"][\"state\"][\"main_system\"][:, 0],\n ),\n (\n make_line(r\"RL-CMRAC $x_{2}$\", \"b-.\"),\n data[\"rlcmrac-sac\"][\"state\"][\"main_system\"][:, 1],\n ),\n ]\n\n dist_lines = {\n \"rlcmrac-sac\": plt.vlines(\n [], [], [],\n colors=FORMATTING[\"rlcmrac-sac\"][\"color\"],\n alpha=1,\n label=\"RL-CMRAC\",\n ),\n \"fecmrac-nullagent\": plt.vlines(\n [], [], [],\n colors=FORMATTING[\"fecmrac-nullagent\"][\"color\"],\n alpha=1,\n label=\"FE-CMRAC\",\n ),\n }\n memory_time_diff = np.diff(data[\"fecmrac-nullagent\"][\"memory\"][\"time\"])\n\n plt.legend(handles=dist_lines.values())\n\n def to_segments(xs, ys):\n return [[[x, 0], [x, y]] for x, y in zip(xs, ys)]\n\n def init():\n states = np.hstack([\n data[exp][\"state\"][\"main_system\"].ravel() for exp in exp_list\n ])\n ylim_max = states.max()\n ylim_min = states.min()\n ylim_btw = 1.2 * (ylim_max - ylim_min)\n ylim_max, ylim_min = ylim_min + ylim_btw, ylim_max - ylim_btw\n\n ax.set_xlim(0, time[-1])\n ax.set_ylim(ylim_min, ylim_max)\n return [line for line, _ in lines] + list(dist_lines.values())\n\n def update(frame):\n for line, line_data in lines:\n line.set_data(time[:frame], line_data[:frame])\n\n # Update FE-CMRAC\n if frame > 0 and memory_time_diff[frame-1] != 0:\n ta_idx = np.argmax(\n time > data[\"fecmrac-nullagent\"][\"memory\"][\"time\"][frame]\n )\n dist_time = time[:ta_idx]\n dist_k = data[\"fecmrac-nullagent\"][\"k\"][:ta_idx]\n dist = [\n -np.exp(-np.trapz(dist_k[i:], dist_time[i:]))\n for i in range(len(dist_time))\n ]\n segments = to_segments(dist_time, dist)\n dist_lines[\"fecmrac-nullagent\"].set_segments(segments)\n\n # Update RL-CMRAC\n dist_time = data[\"rlcmrac-sac\"][\"memory\"][\"t\"][frame]\n dist = 100 * data[\"rlcmrac-sac\"][\"dist\"][frame]\n segments = to_segments(dist_time, dist)\n dist_lines[\"rlcmrac-sac\"].set_segments(segments)\n return [line for line, _ in lines] + list(dist_lines.values())\n\n anim = FuncAnimation(\n fig, update, init_func=init,\n frames=range(0, len(time), 10), interval=10\n )\n # plt.show()\n anim.save(os.path.join(args.save_dir, \"dist-movie.mp4\"))", "def test_plt_v3offset_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n result = ta.plt_v3offset_time()\n\n assert bokeh_plot_type == type(result)", "def plotSingleTimeseries(data):\r\n \r\n print '...creating plot'\r\n fig = plt.figure(figsize=(11,8.5))\r\n ax = fig.add_subplot(111)\r\n for header in HEADER_NAMES[1:]:\r\n ax.plot(data[HEADER_NAMES[0]],data[header],label=header)\r\n #i, h = ax.get_legend_handles_labels()\r\n \r\n fig.autofmt_xdate()\r\n ax.set_title(PLOT_TITLE)\r\n ax.set_xlabel(X_AXIS_TITLE)\r\n ax.set_ylabel(Y_AXIS_TITLE)\r\n ax.grid(True)\r\n ax.xaxis.set_major_formatter(md.DateFormatter('%m-%d-%Y %H:%M'))\r\n #print i,h\r\n ax.legend()\r\n plt.show()\r\n return i,h", "def plot_results_traj_3d(p_x, p_y, p_z, xmin, xmax, ymin, ymax, zmin, zmax):\n fig, ax = plt.subplots(2 , 2, figsize = (10, 10))\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n for t in np.arange(0, p_x.shape[1], step = 1): \n ax[0,0].plot(t, p_x[p, t], 'rx') \n ax[0,1].plot(t, p_y[p, t], 'gx') \n ax[1,0].plot(t, p_z[p, t], 'bx') \n ax[1,1].plot(t, p_x[p, t], 'rx') \n ax[1,1].plot(t, p_y[p, t], 'gx') \n ax[1,1].plot(t, p_z[p, t], 'bx') \n for a in ax.flat: \n a.set(xlabel = 'Time steps', ylabel = 'Position')\n ax[0,0].set_title('X (pix)') \n ax[0,0].set_ylim([xmin, xmax]) \n ax[0,1].set_title('Y (pix)') \n ax[0,1].set_ylim([ymin, ymax]) \n ax[1,0].set_title('Z (pix)') \n ax[1,0].set_ylim([zmin, zmax])\n ax[1,1].set_title('Positions combined') \n ax[1,1].set_ylim([np.array([xmin, ymin, zmin]).min(), np.array([xmax, ymax, zmax]).max()])", "def load_and_plot(file_name):\n data = loadtxt(file_name, delimiter=',') \n #Plot the data\n scatter(data[:, 0], data[:, 1], marker='o', c='b')\n title('Profits distribution')\n xlabel('Population of City in 10,000s')\n ylabel('Profit in $10,000s')\n show()\n return data", "def multi_plot(data, fname=None):\n for entry in data['data']:\n plt.plot(entry['x'], entry['y'], label=entry['label'])\n\n plt.title(data['title'])\n plt.xlabel(data['x_label'])\n plt.ylabel(data['y_label'])\n\n #plt.legend(loc='best')\n\n Plotter.show(data['title'], fname=fname)", "def plot2dTimeSeries(values, title='series', xLabel='time', yLabel='values', savePath='.'):\n plt.plot(values)\n plt.ylabel(yLabel)\n plt.xlabel(xLabel)\n plt.xticks(np.linspace(0, len(values), 11))\n plt.title(title)\n plt.savefig(f'{savePath}/{title}.png')\n plt.show(block=False)\n plt.pause(2)\n plt.close()", "def plot_tap(file: str, before: DataFrame, during: DataFrame, after: DataFrame, time_col: str):\n\n print(\"Making plots at time \" + str(before[time_col].iloc[0]))\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n ax = before.plot(time_col, y, kind = 'scatter', color = 'blue', label = 'Before Tap')\n after.plot(time_col, y, kind = 'scatter', color = 'red', label = 'After Tap', ax = ax)\n during.plot(time_col, y, kind = 'scatter', color = 'black', label = 'During Tap', ax = ax)\n plt.axes(ax)\n plt.xlabel('Event Time')\n plt.ylabel(y)\n\n min_x = before[time_col].iloc[0] - (before[time_col].iloc[1] - before[time_col].iloc[0]) * 50\n min_y = min([min(during[y]), min(before[y]), min(after[y])])\n # Mark the mean during tap event (Feature 1)\n mean_during = mean(during[y])\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n plt.hlines(y = mean_during, xmin = min_x, xmax = during[time_col].iloc[-1], linestyle='dashed', \\\n color='black')\n plt.annotate(xy = (min_x, mean_during), s = 'avgDuringTap')\n # Mark the mean before\n plt.hlines(y = mean_before, xmin = min_x, xmax = before[time_col].iloc[-1], linestyle='dashed', \\\n color='blue')\n plt.annotate(xy = (min_x, mean_before), s = 'avg100msBefore')\n # Mark the mean after\n plt.hlines(y = mean_after, xmin = min_x, xmax = after[time_col].iloc[-1], linestyle='dashed', \\\n color='red')\n plt.annotate(xy = (min_x, mean_after), s = 'avg100msAfter')\n\n plt.legend()\n\n plt.savefig(file+'_'+y+'_time_'+str(before[time_col].iloc[0]) + '.png')\n\n plt.close()", "def visualize_time_series(fig_ax, data, inp_color, missing_data, lag_color, first_date,\n x_label=\"Number of Days\", y_label=\"Log of Aluminium Price\", title=\"Prices over time\"):\n fig, ax = fig_ax\n ((x_train_raw, y_train_raw), y_pred_list) = data\n\n missing_x, missing_y = missing_data\n is_missing = len(missing_x) != 0\n\n first_date = datetime.strptime(first_date, '%Y-%m-%d')\n\n convert_date = lambda x: [\n np.datetime64((first_date + timedelta(days=d)).strftime('%Y-%m-%d'))\n for d in x\n ]\n convert_price = lambda x: x[\"Output\"].to_list()\n\n x_train = convert_date(x_train_raw[\"Date\"].to_list())\n y_train = convert_price(y_train_raw)\n \n cut_point = x_train[-1]\n ax.plot(x_train, y_train, color=color[inp_color])\n\n for i, y_pred in enumerate(y_pred_list):\n data, plot_name, color_code, is_bridge = y_pred\n mean_pred, x_test_raw = data[\"mean\"], data[\"x\"]\n x_test = convert_date(x_test_raw)\n\n if i == 0 and is_missing:\n missing_x = convert_date(missing_x)\n ax.axvline(x_test[0], color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n ax.plot([missing_x[-1], x_test[0]], [missing_y[-1], mean_pred[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvspan(cut_point, x_test[0], color=color[lag_color], alpha=0.1)\n\n plot_bound(ax, data, x_test, color[color_code], plot_name)\n\n if is_bridge and (not is_missing): \n ax.plot([x_train[-1], x_test[0]], [y_train[-1], mean_pred[0]], color[color_code], linewidth=1.5)\n\n if is_missing:\n ax.plot(missing_x, missing_y, color=color[lag_color], linestyle=\"dashed\")\n ax.plot([x_train[-1], missing_x[0]], [y_train[-1], missing_y[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvline(cut_point, color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n else:\n ax.axvline(cut_point, color=color[\"k\"], linestyle='--')\n\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.legend()\n\n # ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n\n # ax.set_xlim(left=cut_point-np.timedelta64(1, 'm'))\n plot_axis_date(ax, x_train + missing_x + x_test)\n ax.grid()\n return fig, ax", "def plot_time_slices(self):\n U = self.r.u[:, 15:-15, :]\n T = range(U.shape[2])\n kwarglist = [dict(t=t,\n index=self.index,\n U=U,\n levels=self.levels,\n fname=self.time_slice_path(t))\n for t in T]\n util.parallel_process(plot_time_slice, kwarglist=kwarglist)", "def Plot_RCWA_Ssim(Path) :\n #/Users/simonvassant/Documents/20090707_TestsPython/Result_Champ/SSim_O21Pr_2000H_res1000F1.res\n x,y=[],[]\n fs = open(Path, 'r') \n#index_array = 0\n while 1: \n txt = fs.readline()\n if txt =='': \n break\n x.append(float(txt[0:12]))\n y.append(float(txt[13:-1]))\n #x[index_array],y[index_array] = float(txt[0:9]),float(txt[10:17])\n #index_array = index_array+1\n \n fs.close()\n plt.figure(1)\n plt.plot(x,y)\n plt.xlabel(r\"Longueur d'onde $(\\mu m)$\")\n plt.ylabel('R')", "def plot_data(data, title):\n plt.title(title)\n plt.plot(range(len(data)), data[:, 0], 'r-', label='x')", "def plot_trinity(time, data, lgnd=None):\n pylab.figure()\n pylab.plot(time, data)\n pylab.xlabel('time, s')\n pylab.ylabel('data')\n pylab.title('Triad Plotter')\n if lgnd != None:\n pylab.legend((lgnd[0], lgnd[1], lgnd[2]))\n pylab.grid(True)\n\n pylab.show()", "def plot_basic(time, data, lgnd=None):\n pylab.figure()\n pylab.plot(time, data)\n pylab.xlabel('time, s')\n pylab.ylabel('data')\n pylab.title('Basic Plotter')\n if lgnd != None:\n pylab.legend(lgnd)\n pylab.grid(True)\n pylab.show()", "def OneValueWithTimePlot(timeline, Ydat1, Label1, xmin, xmax, ymin, ymax, x_auto, y_auto,\n XLabel, YLabel, SupTitle, Title, FileName,\n currentDate, currentTime, Software_version):\n\n rc('font', size=6, weight='bold')\n fig = plt.figure(figsize=(9, 5))\n ax1 = fig.add_subplot(111)\n ax1.plot(Ydat1, linestyle='-', linewidth='1.00', label=Label1)\n ax1.legend(loc='upper right', fontsize=6)\n ax1.grid(visible=True, which='both', color='silver', linestyle='-')\n if x_auto == 0: ax1.set_xlim([xmin, xmax])\n if y_auto == 0: ax1.set_ylim([ymin, ymax])\n ax1.set_ylabel(YLabel, fontsize=6, fontweight='bold')\n ax1.set_title(Title, fontsize=6)\n ax1.set_xlabel(XLabel, fontsize=6, fontweight='bold')\n text = ax1.get_xticks().tolist()\n for i in range(len(text)-1):\n k = int(text[i])\n text[i] = timeline[k] # text[i] = timeline[i] ???\n ax1.set_xticklabels(text, fontsize=6, fontweight='bold')\n fig.subplots_adjust(top=0.92)\n fig.suptitle(SupTitle, fontsize=8, fontweight='bold')\n fig.text(0.79, 0.03, 'Processed ' + currentDate + ' at ' + currentTime,\n fontsize=4, transform=plt.gcf().transFigure)\n fig.text(0.11, 0.03, 'Software version: ' + Software_version + ', [email protected], IRA NASU',\n fontsize=4, transform=plt.gcf().transFigure)\n pylab.savefig(FileName, bbox_inches='tight', dpi=160)\n plt.close('all')\n return", "def plot_powerlaw_output(timeDB, xunits='yr', yunits='MPa', skip=8, P0=33.0):", "def open_xy(data):\n twotheta, intensity = [], []\n with open(data) as f:\n for line in f:\n row = line.split()\n twotheta.append(row[0])\n intensity.append(row[1])\n xyarray = list(zip(twotheta, intensity))\n xyarray = np.asarray(xyarray)\n xyarray = xyarray.astype(np.float)\n return xyarray", "def plot_params_at_time(folder, t, epsilon=0.1, show_params=False):\n # Plot erstellen und beschriften\n if \"2s\" not in folder:\n logging.log(40, \"Plot nicht verfuegbar fuer 3-Zustaende Modell\")\n return \n ax = plt.axes()\n ax.get_xaxis().get_major_formatter().set_useOffset(False)\n plt.xlabel(\"ps\")\n plt.ylabel(\"pm\")\n filenames = [name for name in os.listdir(folder) if name.startswith(\"Sim_\")]\n filenames.sort()\n # Alle Sim durchgehen, wenn Bedingung erfuellt, plotten\n dots = iter([\"r^\", \"co\", \"ks\", \"g^\", \"yo\", \"ms\", \"b^\", \"ko\", \"gs\", \"m^\", \"ro\", \"ys\", \"c^\", \"bo\", \"ws\"])\n for filename in filenames: \n with open (folder+filename, \"r+b\") as data:\n sim = pickle.load(data)\n if sim.valid:\n if abs(sim.pd[0] - t) > epsilon:\n logging.log(24, \"Abweichung zu gross, %s, bei sim %s\", sim.pd[0], sim)\n else:\n #Groesse der Punkte zeigt Breite(IQR) des Peaks \n logging.log(20, \"Gefunden: %s\", sim.pd)\n ax.plot(sim.params[0], sim.params[1], dots.__next__(), markersize = 2+(sim.pd[2]), label = (str(round(sim.pd[2],2)) + \" \" + str(round(sim.pd[0], 2))))\n if show_params:\n ax.text(sim.params[0], sim.params[1], str(round(sim.params[0],8)) +\"_\" + str(round(sim.params[1],5)))\n logging.log(21, sim.pd[3])\n plt.suptitle(\"Parameter fuer Zeit \"+ str(t) + \" mit Abweichung \" + str(epsilon))\n plt.legend(title = \"Breite, Retentionszeit\", numpoints = 1, loc = 2)\n plt.show()", "def test_3d_steam_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/full3D.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n write_readback(dic,data)", "def surface_plot(name: str = 'start_date_analysis1.pkl'):\n df = pd.read_pickle(name)\n\n # set up a figure twice as wide as it is tall\n fig = plt.figure(figsize=plt.figaspect(0.5))\n # ===============\n # First subplot\n # ===============\n # set up the axes for the first plot\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n ax.set_title('Modifications per File')\n ax.set_xlabel('Date (Months)')\n ax.set_ylabel('Threshold Individual')\n for idx, row in enumerate(sorted(df['threshold_pairs'].unique())):\n data = df[df['threshold_pairs'] == row]\n label = 'Threshold pairs ' + str(row)\n # Plot the surface.\n surf = ax.plot_trisurf(data['date'], data['threshold'], data['mpf'], alpha=0.7,\n linewidth=0, antialiased=False, label=label)\n surf._facecolors2d = surf._facecolors3d\n surf._edgecolors2d = surf._edgecolors3d\n # ===============\n # Second subplot\n # ===============\n # set up the axes for the second plot\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n ax.set_title('Transitions per Test')\n ax.set_xlabel('Date (Months)')\n ax.set_ylabel('Threshold Individual')\n for idx, row in enumerate(sorted(df['threshold_pairs'].unique())):\n data = df[df['threshold_pairs'] == row]\n label = 'Threshold pairs ' + str(row)\n # Plot the surface.\n\n surf = ax.plot_trisurf(data['date'], data['threshold'], data['tpt'], alpha=0.7,\n linewidth=0, antialiased=False, label=label)\n\n surf._facecolors2d = surf._facecolors3d\n surf._edgecolors2d = surf._edgecolors3d\n\n # cbar = fig.colorbar(surf)\n # cbar.locator = LinearLocator(numticks=10)\n # cbar.update_ticks()\n\n plt.suptitle('Threshold Start Date Analysis 3D', fontsize=14)\n plt.legend()\n plt.show()", "def plot_times(self, train_time, title=None, xmin=None, xmax=None,\n ymin=None, ymax=None, ax=None, show=True, color=None,\n xlabel=True, ylabel=True, legend=True, chance=True,\n label='Classif. score'):\n if not np.array(train_time).dtype is np.dtype('float'):\n raise ValueError('train_time must be float | list or array of '\n 'floats. Got %s.' % type(train_time))\n\n return plot_gat_times(self, train_time=train_time, title=title,\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax, ax=ax, show=show,\n color=color, xlabel=xlabel, ylabel=ylabel,\n legend=legend, chance=chance, label=label)", "def _onLoad1DData(self, event):\r\n path = None\r\n dlg = wx.FileDialog(self, \"Choose a file\", os.getcwd(), \"\", \"*.txt\", wx.OPEN)\r\n if dlg.ShowModal() == wx.ID_OK:\r\n path = dlg.GetPath()\r\n mypath = os.path.basename(path)\r\n print mypath\r\n dlg.Destroy()\r\n\r\n file_x = []\r\n file_y = []\r\n file_dy = []\r\n file_dx = []\r\n if not path == None:\r\n self.path =path\r\n input_f = open(path,'r')\r\n buff = input_f.read()\r\n lines = buff.split('\\n')\r\n for line in lines:\r\n try:\r\n toks = line.split()\r\n x = float(toks[0])\r\n y = float(toks[1])\r\n #dx = math.sqrt(x)\r\n dx=1/x\r\n if dx >= x:\r\n dx = 0.9*x\r\n #dy = math.sqrt(y)\r\n dy=1/y\r\n if dy >= y:\r\n dy = 0.9*y\r\n file_x.append(x)\r\n file_y.append(y)\r\n file_dy.append(dy)\r\n file_dx.append(dx)\r\n\r\n except:\r\n print \"READ ERROR\", line\r\n\r\n # Sanity check\r\n if not len(file_x) == len(file_dx):\r\n raise ValueError, \"X and dX have different length\"\r\n if not len(file_y) == len(file_dy):\r\n raise ValueError, \"y and dy have different length\"\r\n # reset the graph before loading\r\n self.graph.reset()\r\n self.file_data.x = file_x\r\n self.file_data.y = file_y\r\n self.file_data.dy = file_dy\r\n #self.file_data.dy = None\r\n\r\n #self.file_data.dx = file_dx\r\n self.file_data.dx = None\r\n\r\n self.file_data.reset_view()\r\n\r\n self.file_data.name = \"Loaded 1D data\"\r\n self.graph.xaxis('\\\\rm{q} ', 'A^{-1}')\r\n self.graph.yaxis(\"\\\\rm{Intensity} \",\"cm^{-1}\")\r\n\r\n # Set the scale\r\n self.set_yscale('log')\r\n self.set_xscale('linear')\r\n #Add the default transformation of x and y into Property Dialog\r\n if self.get_xscale()=='log':\r\n xtrans=\"Log(x)\"\r\n if self.get_xscale()=='linear':\r\n xtrans=\"x\"\r\n if self.get_yscale()=='log':\r\n ytrans=\"Log(y)\"\r\n if self.get_yscale()=='linear':\r\n ytrans=\"y\"\r\n self.setTrans(xtrans,ytrans)\r\n\r\n #Plot the data\r\n self.graph.add(self.file_data)\r\n self. _onEVT_FUNC_PROPERTY()\r\n\r\n #self.graph.render(self)\r\n #self.subplot.figure.canvas.draw_idle()\r", "def plot(filename):\n\tdata = np.load(filename)\n\tAlpha = data['ALPHA']\n\tCn = data['NFE']\n\tCf = data['FFE']\n\tP = data['PHINORM']\n\tSr = data['STABILITY']\n\tFlag = data['FLAG']\n\tIter = data['ITER']\n\tF = data['F']\n\tD = data['D']\n\tE = data['EPSILON']\n\tdelta = data['DELTA']\n\n\t#auxiliary variables\n\tLogA = np.log10(Alpha)\n\tLogP = np.log10(P)\n\n\tplt.rc('text', usetex=False)\n\tplt.rc('font', family='serif')\n\torient = 'vertical'\n\tXLABEL = r\"$d$\"\n\tYLABEL = r\"$\\epsilon$\"\n\t\n\tNd = D.shape[0]\n\tNepsilon = D.shape[1]\n\tN = Nd*Nepsilon\n\t\n\tD = np.reshape(D, (N, ))\n\tE = np.reshape(E, (N, ))\n\tSr = np.reshape(Sr, (N, ))\n\tLogA = np.reshape(LogA, (N, ))\n\tLogP = np.reshape(LogP, (N, ))\n\tG = np.sqrt(np.abs(F))/delta\n\tG = np.reshape(G, (N, ))\n\n\tfig = plt.figure(1)\n\tax = fig.add_subplot(221, projection='3d')\n\tax.set_xlabel(XLABEL, fontsize=AXESFONTSIZE)\n\tax.set_ylabel(YLABEL, fontsize=AXESFONTSIZE)\n\tax.set_zlabel(r\"$\\log_{10}{\\alpha}$\", fontsize=AXESFONTSIZE)\n\tax.set_xlim3d(D.min(), D.max())\n\tax.set_ylim3d(E.min(), E.max())\n\tax.set_zlim3d(LogA.min(), LogA.max())\n\tsurf1 = ax.plot_trisurf(D, E, LogA, vmin=LogA.min(), vmax=LogA.max(), cmap=cm.jet, edgecolor='none')\n\tfig.colorbar(surf1, shrink=0.5, aspect=5, pad=0.05, orientation = orient)\n\n\tax = fig.add_subplot(222, projection='3d')\n\tax.set_xlabel(XLABEL, fontsize=AXESFONTSIZE)\n\tax.set_ylabel(YLABEL, fontsize=AXESFONTSIZE)\n\tax.set_zlabel(\"\\n\" + r\"$\\log\\left(\\Vert \\phi\\Vert_{L^{2}(\\partial D_{a})} \\right)$\", fontsize=AXESFONTSIZE, linespacing=2)\n\tax.set_xlim3d(D.min(), D.max())\n\tax.set_ylim3d(E.min(), E.max())\n\tsurf2 = ax.plot_trisurf(D, E, LogP, cmap=cm.jet, vmin=LogP.min(), vmax=LogP.max(), edgecolor='none')\n\tfig.colorbar(surf2, shrink=0.5, aspect=5, pad=0.05, orientation = orient)\n\n\tax = fig.add_subplot(223, projection='3d')\n\tax.set_xlabel(XLABEL, fontsize=AXESFONTSIZE)\n\tax.set_ylabel(YLABEL, fontsize=AXESFONTSIZE)\n\tax.set_zlabel(\"\\n\" + r\"$\\frac{\\Vert \\phi^{\\epsilon} - \\phi_{0}\\Vert}{\\Vert \\phi^{0}\\Vert}$\", fontsize=AXESFONTSIZE, linespacing=2)\n\tax.set_xlim3d(D.min(), D.max())\n\tax.set_ylim3d(E.min(), E.max())\n\tsurf3 = ax.plot_trisurf(D, E, Sr, cmap=cm.jet, vmin=Sr.min(), vmax=Sr.max(), edgecolor='none')\n\tfig.colorbar(surf3, shrink=0.5, aspect=5, pad=0.05, orientation = orient)\n\n\tax = fig.add_subplot(224, projection='3d')\n\tax.set_xlabel(XLABEL, fontsize=AXESFONTSIZE)\n\tax.set_ylabel(YLABEL, fontsize=AXESFONTSIZE)\n\tax.set_zlabel(\"\\n\" + r\"$\\frac{\\sqrt{|F(\\alpha)|}}{\\delta}$\", fontsize=AXESFONTSIZE, linespacing=2)\n\tax.set_xlim3d(D.min(), D.max())\n\tax.set_ylim3d(E.min(), E.max())\n\tscatter4 = ax.scatter(D, E, G, c=G, s=5)\n\tfig.colorbar(scatter4, shrink=0.5, aspect=5, pad=0.05, orientation = orient)\n\tax.view_init(azim=-129, elev=20)\n\n\tfig.subplots_adjust(left = 0.02, right = 0.98, wspace=0.05, hspace=0.05, top=0.95, bottom=0.05)\n\tplt.show()", "def test_time_series_from_file():\r\n\r\n TR = 1.35\r\n ts_ff = io.time_series_from_file\r\n\r\n #File names:\r\n fmri_file1 = os.path.join(data_path,'fmri1.nii.gz')\r\n fmri_file2 = os.path.join(data_path,'fmri2.nii.gz')\r\n\r\n #Spatial coordinates into the volumes:\r\n coords1 = np.array([[5,5,5,5],[5,5,5,5],[1,2,3,4]])\r\n coords2 = np.array([[6,6,6,6],[6,6,6,6],[3,4,5,6]])\r\n\r\n #No averaging, no normalization:\r\n t1 = ts_ff([fmri_file1,fmri_file2],[coords1,coords2],TR)\r\n\r\n npt.assert_equal(t1[0].shape,(4,80)) # 4 coordinates, 80 time-points\r\n\r\n t2 = ts_ff([fmri_file1,fmri_file2],[coords1,coords2],TR,average=True)\r\n\r\n npt.assert_equal(t2[0].shape,(80,)) # collapse coordinates,80 time-points\r\n\r\n t3 = ts_ff(fmri_file1,coords1,TR,normalize='zscore')\r\n\r\n #The mean of each channel should be almost equal to 0:\r\n npt.assert_almost_equal(t3.data[0].mean(),0)\r\n #And the standard deviation should be almost equal to 1:\r\n npt.assert_almost_equal(t3.data[0].std(),1)\r\n\r\n t4 = ts_ff(fmri_file1,coords1,TR,normalize='percent')\r\n\r\n #In this case, the average is almost equal to 0, but no constraint on the\r\n #std:\r\n npt.assert_almost_equal(t4.data[0].mean(),0)\r\n\r\n #Make sure that we didn't mess up the sampling interval:\r\n npt.assert_equal(t4.sampling_interval,nitime.TimeArray(1.35))\r\n\r\n # Test the default behavior:\r\n data = io.load(fmri_file1).get_data()\r\n t5 = ts_ff(fmri_file1)\r\n npt.assert_equal(t5.shape, data.shape)\r\n npt.assert_equal(t5.sampling_interval, ts.TimeArray(1, time_unit='s'))\r\n\r\n # Test initializing TR with a TimeArray:\r\n t6= ts_ff(fmri_file1, TR=ts.TimeArray(1350, time_unit='ms'))\r\n npt.assert_equal(t4.sampling_interval, t6.sampling_interval)\r\n\r\n # Check the concatenation dimensions:\r\n t7 = ts_ff([fmri_file1, fmri_file2])\r\n npt.assert_equal([t7.shape[:3], t7.shape[-1]], [data.shape[:3], data.shape[-1]*2])\r\n\r\n t8 = ts_ff([fmri_file1, fmri_file2], average=True)\r\n npt.assert_equal(t8.shape[0], data.shape[-1]*2)\r\n\r\n t9 = ts_ff([fmri_file1, fmri_file2], average=True, normalize='zscore')\r\n npt.assert_almost_equal(t9.data.mean(), 0)", "def load_pt3(filename, ovcfunc=None):\n assert os.path.isfile(filename), \"File '%s' not found.\" % filename\n\n t3records, timestamps_unit, nanotimes_unit, meta = pt3_reader(filename)\n detectors, timestamps, nanotimes = process_t3records(\n t3records, time_bit=16, dtime_bit=12, ch_bit=4, special_bit=False,\n ovcfunc=ovcfunc)\n acquisition_duration = meta['header']['AcquisitionTime'][0] * 1e-3\n ctime_t = time.strptime(meta['header']['FileTime'][0].decode(),\n \"%d/%m/%y %H:%M:%S\")\n creation_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", ctime_t)\n meta.update({'timestamps_unit': timestamps_unit,\n 'nanotimes_unit': nanotimes_unit,\n 'acquisition_duration': acquisition_duration,\n 'laser_repetition_rate': meta['ttmode']['InpRate0'],\n 'software': meta['header']['CreatorName'][0].decode(),\n 'software_version': meta['header']['CreatorVersion'][0].decode(),\n 'creation_time': creation_time,\n 'hardware_name': meta['header']['Ident'][0].decode(),\n })\n return timestamps, detectors, nanotimes, meta", "def plot(axes, axis, values, c='chartreuse'):\n a = axes[axis]\n a.set_xlabel('time (s)')\n x = np.array(range(len(values))) / 1000\n dim = 'x' if axis == 0 else 'y' if axis == 1 else 'z'\n a.set_title('-'.join([dim, 'acceleration']))\n a.plot(x, values / 1000, c=c)", "def makePlot(timeStamp):\n\n #-------------------------------------------------------------------------\n # Create figure and axes\n #-------------------------------------------------------------------------\n\n width = 12 # inches\n height = 8 # inches\n fig = plt.figure(figsize=(width, height))\n\n # We'll use gridspec to create axes in rectangular 6-by-5 lattice\n import matplotlib.gridspec as gridspec\n nrows = 6\n ncols = 5\n Grid = gridspec.GridSpec(nrows, ncols)\n\n # axis for elevation time series\n axElev = fig.add_subplot(Grid[:2, :2]) # first 2 rows, first 2 columns\n # axis for slab\n axSlab = fig.add_subplot(Grid[:2, 2:]) # first 2 rows, columns > 2\n # and the transects\n axTran1 = fig.add_subplot(Grid[2:4, :]) # rows 2,3,4, all columns\n # rows 5,6,7, all columns, share x/y axis with previous (sets same ticks\n # etc)\n axTran2 = fig.add_subplot(Grid[4:6, :], sharex=axTran1, sharey=axTran1)\n\n # gridspec allows to tune the spacing between plots (unit is fraction of\n # font size)\n boundary_pad = 3.5\n horizontal_pad = 0.2\n vertical_pad = 1.0\n # figure area left,bottom,right,top in normalized coordinates [0,1]\n bounds = [0, 0, 1, 1]\n Grid.tight_layout(\n fig,\n pad=boundary_pad,\n w_pad=horizontal_pad,\n h_pad=vertical_pad,\n rect=bounds)\n\n #-------------------------------------------------------------------------\n # Create plots\n #-------------------------------------------------------------------------\n\n # for all avaiable colormaps see ( '_r' reverses the colormap )\n # http://matplotlib.org/examples/color/colormaps_reference.html\n colormap = plt.get_cmap('Spectral_r')\n colormap_kine = plt.get_cmap('gist_heat')\n\n # slab\n salt_clim = [0, 32]\n ncontours = 16\n # bouding box for slab [xmin,xmax,ymin,ymax] in model x,y coordinates\n estuarybbox = [330000, 360000, 284500, 297500]\n dia = slabSnapshotDC(\n clabel='Salinity',\n unit='psu',\n clim=salt_clim,\n cmap=colormap)\n dia.setAxes(axSlab)\n dia.addSample(slabDC, timeStamp=timeStamp, plotType='contourf',\n bbox=estuarybbox, N=ncontours)\n # overrides default format for colorbar floats\n dia.showColorBar(format='%.2g')\n #dia.addTitle('in case you want a custom title')\n # get transect (x,y) coordinates from the transectDC\n transectXYCoords = generateTransectFromDataContainer(transectDC_salt, 0)[4]\n # plot transect on the map (thin black on thick white)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='w', linewidth=2.0)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='k', linewidth=1.0)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(\n staX,\n staY,\n label=station,\n printLabel=True,\n marker='*')\n # add text to plot. x,y are in normalized axis coordinates [0,1]\n dia.ax.text(0.05, 0.98, 'custom text', fontsize=fontsize,\n verticalalignment='top', horizontalalignment='left',\n transform=dia.ax.transAxes)\n\n # elevation time series\n # define the time range to plot\n elevStartTime = datetime.datetime(2012, 5, 4, 0, 0)\n elevEndTime = datetime.datetime(2012, 5, 5, 0, 15)\n elevMeanTime = elevStartTime + (elevEndTime - elevStartTime) / 2\n elevLim = [-1.5, 2.5]\n dia = timeSeriesPlotDC2(\n xlabel=elevMeanTime.strftime('%Y %b %d'),\n ylim=elevLim)\n dia.setAxes(axElev)\n #dia.addShadedRange( timeStamp, timeStamp+datetime.timedelta(seconds=30), facecolor='IndianRed')\n dia.addShadedRange(\n timeStamp,\n timeStamp,\n edgecolor='IndianRed',\n facecolor='none',\n linewidth=2)\n tag = elevDC.getMetaData('tag')\n dia.addSample(\n elevDC.timeWindow(\n elevStartTime,\n elevEndTime),\n label=tag,\n color='k')\n dia.addTitle('Elevation ({0:s}) [m]'.format(\n elevDC.getMetaData('location').upper()))\n # adjust the number of ticks in x/y axis\n dia.updateXAxis(maxticks=5)\n dia.updateYAxis(maxticks=3, prune='lower')\n\n # transects\n dia = transectSnapshotDC(\n clabel='Salinity',\n unit='psu',\n cmap=colormap,\n clim=salt_clim)\n dia.setAxes(axTran1)\n #transectDC_salt.data *= 1e-3\n dia.addSample(transectDC_salt, timeStamp, N=ncontours)\n dia.addTitle('')\n dia.showColorBar()\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n # do not show x axis ticks and label for this plot\n dia.hideXTicks()\n\n dia = transectSnapshotDC(clabel='TKE', unit='m2s-1', logScale=True,\n clim=[-7, -2], climIsLog=True, cmap=colormap_kine)\n dia.setAxes(axTran2)\n dia.addSample(transectDC_kine, timeStamp, N=ncontours)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n dia.addTitle('')\n dia.showColorBar()\n dia.updateXAxis(maxticks=15)\n dia.updateYAxis(maxticks=6)\n\n #-------------------------------------------------------------------------\n # Save to disk\n #-------------------------------------------------------------------------\n dateStr = timeStamp.strftime('%Y-%m-%d_%H-%M')\n filename = '_'.join([imgPrefix, dateStr])\n saveFigure(\n imgDir,\n filename,\n imgFiletype,\n verbose=True,\n dpi=200,\n bbox_tight=True)\n plt.close()", "def plotDFT(x):\n \n X = DFTdirect(x)\n plt.plot([c.re for c in x], [c.im for c in x], 'ro')\n plt.plot([c.re for c in X], [c.im for c in X], 'bo')\n plt.show()", "def test_2d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n dates = df_iv[df_iv['dte'] == 30]['date']\n impl_vols = df_iv[df_iv['dte'] == 30]['impl_vol']\n db.close()\n\n print df_iv.sort_values('impl_vol').head()\n\n plt.plot(dates, impl_vols)\n plt.xlabel('date')\n plt.ylabel('impl_vols')\n plt.show()", "def scatter(filename, data, lines=[]):\n import matplotlib.pyplot as plot\n plot.figure(random.randint(0, 10000000))\n plot.scatter(data[0], data[1], 20, 'b', 'o')\n plot.title(filename.split('.')[0])\n for line in lines:\n plot.plot([line[0], line[2]], [line[1], line[3]], '-')\n plot.savefig(filename)", "def plotData(BX,BY,xi,yi,expArr,t,savepath_dir):\r\n \r\n #Find the current channel data\r\n Jz=newCurrent(BX,BY,xi,yi,expArr,t)\r\n\r\n #Find the dipole vector components\r\n BxTime=np.real(BX*expArr[t])\r\n ByTime=np.real(BY*expArr[t])\r\n\r\n #Plot the current density contour and dipole vector grid\r\n #Create the figure\r\n p1=plt.figure(figsize=(9,8))\r\n \r\n #Plot the data\r\n p1=plt.contourf(xi,yi,Jz,levels=100,vmin=-0.1,vmax=0.1)\r\n qv1=plt.quiver(xi,yi,BxTime,ByTime,width=0.004,scale=3)\r\n \r\n #Add axes labels and title\r\n p1=plt.xlabel('X [cm]',fontsize=20)\r\n p1=plt.ylabel('Y [cm]',fontsize=20)\r\n # p1=plt.title('Alfven Wave Dipole; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n p1=plt.title('E Field; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n \r\n #Set axes parameters\r\n p1=plt.xticks(np.arange(-50,51,5))\r\n p1=plt.yticks(np.arange(-50,51,5))\r\n p1=plt.xlim(-xAxisLim,xAxisLim)\r\n p1=plt.ylim(-yAxisLim,yAxisLim)\r\n \r\n #Add colorbar\r\n cbar=plt.colorbar()\r\n cbar.set_label('Normalized Current Density',rotation=270,labelpad=15)\r\n cbar=plt.clim(-1,1)\r\n \r\n #Add vector label\r\n plt.quiverkey(qv1,-0.1,-0.1,0.2,label=r'$(B_x,B_y)$')\r\n \r\n #Miscellaneous\r\n p1=plt.tick_params(axis='both', which='major', labelsize=18)\r\n p1=plt.grid(True)\r\n p1=plt.gcf().subplots_adjust(left=0.15)\r\n\r\n #Save the plot\r\n savepath_frame=savepath_dir+'frame'+str(t+1)+'.png'\r\n p1=plt.savefig(savepath_frame,dpi=100,bbox_to_anchor='tight')\r\n p1=plt.close()\r\n\r\n #Let me know which frame we just saved\r\n print('Saved frame '+str(t+1)+' of '+str(len(expArr)))\r\n \r\n return", "def plot_tseries(time_series, fig=None, axis=0,\r\n xticks=None, xunits=None, yticks=None, yunits=None,\r\n xlabel=None, ylabel=None, yerror=None, error_alpha=0.1,\r\n time_unit=None, **kwargs):\r\n\r\n if fig is None:\r\n fig = plt.figure()\r\n\r\n if not fig.get_axes():\r\n ax = fig.add_subplot(1, 1, 1)\r\n else:\r\n ax = fig.get_axes()[axis]\r\n\r\n #Make sure that time displays on the x axis with the units you want:\r\n #If you want to change the time-unit on the visualization from that used to\r\n #represent the time-series:\r\n if time_unit is not None:\r\n tu = time_unit\r\n conv_fac = ts.time_unit_conversion[time_unit]\r\n #Otherwise, get the information from your input:\r\n else:\r\n tu = time_series.time_unit\r\n conv_fac = time_series.time._conversion_factor\r\n\r\n this_time = time_series.time / float(conv_fac)\r\n ax.plot(this_time, time_series.data.T, **kwargs)\r\n\r\n if xlabel is None:\r\n ax.set_xlabel('Time (%s)' % tu)\r\n else:\r\n ax.set_xlabel(xlabel)\r\n\r\n if ylabel is not None:\r\n ax.set_ylabel(ylabel)\r\n\r\n if yerror is not None:\r\n if len(yerror.data.shape) == 1:\r\n this_e = yerror.data[np.newaxis, :]\r\n else:\r\n this_e = yerror.data\r\n delta = this_e\r\n e_u = time_series.data + delta\r\n e_d = time_series.data - delta\r\n for i in range(e_u.shape[0]):\r\n ax.fill_between(this_time, e_d[i], e_u[i], alpha=error_alpha)\r\n\r\n return fig", "def Read_PT3(Path):\n f = open(Path, \"rb\")\n f.read(584) # Reads all the shit above\n Resolution = struct.unpack('f',f.read(4))[0] # in ns\n\n f.read(116)\n CntRate0 = struct.unpack('i',f.read(4))[0]\n f.read(12)\n Records = struct.unpack('i',f.read(4))[0]\n Hdrsize = struct.unpack('i',f.read(4))[0] #Size of special header\n if (Hdrsize != 0): #depending of point or image mode, header is there or not... \n ImgHdr = struct.unpack('36i',f.read(Hdrsize*4)) \n else :\n pass\n #f.read(148)\n ofltime = 0\n cnt_Ofl = 0\n cnt_lstart = 0\n cnt_lstop = 0\n cnt_Err = 0\n cnt_f = 0 #added Simon, frame counter\n WRAPAROUND = 65536\n syncperiod = 1e9/np.float(CntRate0) # in ns, CntRate0 is in Hz\n marker_frame,marker_linestart, marker_linestop = [],[],[] # Record marker events corresponding to line start and line stop\n pt = np.zeros(np.int(Records))\n sync = np.zeros(np.int(Records))\n for ii in range(np.int(Records)):\n T3Record = struct.unpack('I',f.read(4))\n A = T3Record[0]\n chan = (A&((2**4-1)<<(32-4)))>>(32-4) # Read highest 4 bits\n nsync = A&(2**16-1) #Read lowest 16 bits\n dtime = 0\n if ((chan ==1)|(chan ==2)|(chan ==3)|(chan ==4)):\n dtime = (A&(2**12-1)<<16)>>16\n elif chan == 15:\n markers = (A&(2**4-1)<<16)>>16\n # Then depending on the marker value there are different possibilties\n if markers == 0 :#This is a time overflow\n ofltime = ofltime + WRAPAROUND\n cnt_Ofl = cnt_Ofl+1\n elif markers == 1: # it is a true marker, 1 is for Frame start/stop\n #print (markers)\n cnt_f = cnt_f+1\n marker_frame.append((ofltime + nsync)*syncperiod)\n elif markers == 2 :# Here I take all the other markers without any differentiation (2 in linestart, 4 is linestop)\n #print (markers)\n cnt_lstart = cnt_lstart+1\n marker_linestart.append((ofltime + nsync)*syncperiod)\n elif markers == 4:\n #print (markers)\n cnt_lstop = cnt_lstop+1\n marker_linestop.append((ofltime + nsync)*syncperiod)\n else : # There is an error, should not happen in T3 Mode (says the PicoQuant Matlab code)... Still got a lot...\n cnt_Err = cnt_Err+1\n \n truensync = ofltime+nsync\n truetime = truensync*syncperiod + dtime*Resolution\n pt[ii] = truetime\n sync[ii] = truensync*syncperiod\n\n marker_frame = np.array(marker_frame)\n marker_linestart = np.array(marker_linestart)\n marker_linestop = np.array(marker_linestop)\n f.close()\n return sync,pt,marker_frame,marker_linestart,marker_linestop,Resolution,CntRate0,cnt_Err", "def plotdFvsTime(f_ts, r_ts, F_df, R_df, F_ddf, R_ddf):\n fig = pl.figure(figsize=(8,6))\n ax = fig.add_subplot(111)\n pl.setp(ax.spines['bottom'], color='#D2B9D3', lw=3, zorder=-2)\n pl.setp(ax.spines['left'], color='#D2B9D3', lw=3, zorder=-2)\n for dire in ['top', 'right']:\n ax.spines[dire].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n max_fts = max(f_ts)\n rr_ts = [aa/max_fts for aa in f_ts[::-1]]\n f_ts = [aa/max_fts for aa in f_ts]\n r_ts = [aa/max_fts for aa in r_ts]\n\n line0 = pl.fill_between([r_ts[0], f_ts[-1]], R_df[0]-R_ddf[0], R_df[0]+R_ddf[0], color='#D2B9D3', zorder=-5)\n for i in range(len(f_ts)):\n line1 = pl.plot([f_ts[i]]*2, [F_df[i]-F_ddf[i], F_df[i]+F_ddf[i]], color='#736AFF', ls='-', lw=3, solid_capstyle='round', zorder=1)\n line11 = pl.plot(f_ts, F_df, color='#736AFF', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#736AFF', ms=12, zorder=2)\n\n for i in range(len(rr_ts)):\n line2 = pl.plot([rr_ts[i]]*2, [R_df[i]-R_ddf[i], R_df[i]+R_ddf[i]], color='#C11B17', ls='-', lw=3, solid_capstyle='round', zorder=3)\n line22 = pl.plot(rr_ts, R_df, color='#C11B17', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#C11B17', ms=12, zorder=4)\n\n pl.xlim(r_ts[0], f_ts[-1])\n\n pl.xticks(r_ts[::2] + f_ts[-1:], fontsize=10)\n pl.yticks(fontsize=10)\n\n leg = pl.legend((line1[0], line2[0]), (r'$Forward$', r'$Reverse$'), loc=1, prop=FP(size=18), frameon=False)\n pl.xlabel(r'$\\mathrm{Fraction\\/of\\/the\\/simulation\\/step}$', fontsize=16, color='#151B54')\n pl.ylabel(r'$\\mathrm{\\Delta G\\/%s}$' % P.units, fontsize=16, color='#151B54')\n pl.xticks(f_ts, ['%.2f' % i for i in f_ts])\n pl.tick_params(axis='x', color='#D2B9D3')\n pl.tick_params(axis='y', color='#D2B9D3')\n pl.savefig(os.path.join(P.output_directory, 'dF_t.pdf'))\n pl.close(fig)\n return", "def write_trajectory_plot_file( filename, times, frames ):\n\n plot = open( filename, \"w\")\n plot.write(\"\"\"# frame plot file \n# Each line represents four Cartesian points for plotting the origin and axis\n# vectors of a moving coordinate frame. A <> in the format represents an (x,y,z)\n# triple. Each axis vector has magnitude 20mm for visibility.\n# format: timestamp <origin> <end of X axis> <end of Y axis> <end of Z axis>\n# units: seconds, millimeters\n\"\"\")\n\n for i,tool in enumerate( frames ):\n origin = tool[0:3,3] # origin vector, expressed in ground frame\n xaxis = tool[0:3,0]\n yaxis = tool[0:3,1]\n zaxis = tool[0:3,2]\n plot.write( \"%f \" % times[i] )\n plot.write( \"%f %f %f \" % tuple(origin) )\n plot.write( \"%f %f %f \" % tuple(origin+20*xaxis) )\n plot.write( \"%f %f %f \" % tuple(origin+20*yaxis) )\n plot.write( \"%f %f %f\\n\" % tuple( origin+20*zaxis) )\n plot.close()\n return", "def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)", "def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)", "def loadAndPlotDecomp1DMassData(dataFile='movingPointMassData/testPointMassDataDecmp000.pkl'):\n\n # Load in modules to handle the 3D plot (which I still do not well understand)\n from matplotlib.collections import PolyCollection as pc\n from mpl_toolkits.mplot3d import Axes3D\n\n # Load the data back (this is the decomposed version of the 1D moving mass data)\n inputDataFile = open(dataFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n gCenters = dataOut[1] # The centers of the Gaussaians\n\n # Load in the original data (the filename is included in the loaded bit) with is the original 1D analog signal\n inputDataFile = open(dataOut[3], \"rb\")\n dataOrig = pickle.load(inputDataFile) # The original 1D mass movement data\n inputDataFile.close()\n\n # Now I need to plot these things out, iterate over the original 1D mass data.\n for i in range(len(dataOrig[0])):\n\n # Plot out the original data\n plt.figure(1)\n plt.plot(dataOrig[0][i][1], dataOrig[0][i][0])\n\n # Now plot out the decoped bits\n segmentedValues = dataOut[0][i]\n fig = plt.figure(2)\n ax = Axes3D(fig) # Because I am using older version\n verts = []\n for j in range(dataOut[1].size):\n segmentedValues[0, j] = 0\n segmentedValues[-1, j] = 0\n # print(list(zip(segmentedValues[:,i],dArray)))\n verts.append(list(zip(segmentedValues[:, j], dataOrig[0][i][1])))\n poly = pc(verts)\n ax.add_collection3d(poly, gCenters, zdir='y')\n ax.set_xlim3d(0, 1.2)\n ax.set_zlim3d(0, 5)\n ax.set_ylim3d(0, 6)\n plt.show()" ]
[ "0.65714025", "0.64165264", "0.64107704", "0.63405186", "0.631422", "0.6297773", "0.6248544", "0.615319", "0.60817546", "0.607438", "0.60634214", "0.60446906", "0.60240567", "0.601342", "0.6000441", "0.5957452", "0.5925133", "0.59175897", "0.5905955", "0.5905455", "0.5882972", "0.587616", "0.58669406", "0.5836873", "0.5815084", "0.58084685", "0.580844", "0.58037347", "0.57826895", "0.5773784", "0.57304704", "0.5717594", "0.571203", "0.57117045", "0.57087666", "0.57009304", "0.56905717", "0.5687393", "0.5668149", "0.5666843", "0.56639147", "0.56517917", "0.5635804", "0.56031716", "0.55938506", "0.5587133", "0.5586617", "0.55798066", "0.5560192", "0.5553023", "0.55523586", "0.5532624", "0.5528472", "0.55162627", "0.55137324", "0.5509489", "0.55086553", "0.55083746", "0.5507028", "0.5504911", "0.55029714", "0.55018216", "0.55000865", "0.54910856", "0.54899377", "0.54811126", "0.5479522", "0.5476463", "0.54742044", "0.54724365", "0.54704356", "0.54669183", "0.5466799", "0.5465278", "0.5458615", "0.54522175", "0.5442312", "0.54403615", "0.5440007", "0.5433357", "0.5432434", "0.54284996", "0.5423157", "0.5422832", "0.54221845", "0.5421056", "0.54138714", "0.54130405", "0.5411566", "0.541099", "0.5407325", "0.5406228", "0.5403066", "0.53988177", "0.5395981", "0.5392671", "0.53839", "0.538094", "0.5378343", "0.5376595" ]
0.6902801
0
Generate new data if directory is given, otherwise only try to plot existing data
def main(): data_file = 'shrec_timer.json' if len(sys.argv) == 2: generate_data(data_file, sys.argv[1]) plot_data(data_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_dir(main):\n try:\n wd = str(main.lineEdit_8.text())\n if wd == '':\n main.msg(\"Error \"+errorPath+\"plot_dir: Must choose directory first\")\n return\n for fi in os.listdir(wd):\n dataPath = os.path.join(wd, fi)\n main.msg(\"Plotting \"+str(fi))\n img = mpimg.imread(str(dataPath))\n imgObj = Img.Img(img, title=str(fi), filePath=str(dataPath))\n main.imgObjList.append(imgObj)\n func.update(main)\n slider.slider_update(main)\n except:\n main.msg(\"Error \"+errorPath+\"plot_dir: Make sure all files are images (tiff, jpeg, etc.)\")", "def make_all_plots(dirname='plots'):\n for worker_type in ['ordinary', 'normal', 'master', None]:\n name = 'rajpal'\n if worker_type is not None:\n name += '-' + worker_type\n data = Data.from_rajpal_icml15(worker_type=worker_type)\n data.make_plots(name)\n data.make_data('{}.csv'.format(name))\n\n data = Data.from_bragg_hcomp13(positive_only=False)\n data.make_plots(os.path.join(dirname, 'bragg'))\n data.make_data(os.path.join(dirname, 'bragg.csv'))\n\n data = Data.from_bragg_hcomp13(positive_only=True)\n data.make_plots(os.path.join(dirname, 'bragg-pos'))\n data.make_data(os.path.join(dirname, 'bragg-pos.csv'))\n\n data = Data.from_lin_aaai12(workflow='tag')\n data.make_plots(os.path.join(dirname, 'lin-tag'))\n data.make_data(os.path.join(dirname, 'lin-tag.csv'))\n\n data = Data.from_lin_aaai12(workflow='wiki')\n data.make_plots(os.path.join('lin-wiki'))\n data.make_data(os.path.join('lin-wiki.csv'))\n\n make_bragg_teach_plots(dirname=dirname)", "def create_plots() -> str:\r\n return _find_or_create_dir(PLOTS_FOLDER)", "def create_plot_dir(base_dir: str) -> str:\n time_str = datetime.now().strftime('%Y%b%d-%H%M%S') \n plot_dir = os.path.join(res_dir, 'fig_'+time_str)\n# plot_dir = os.path.join(res_dir, 'plot')\n if not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n\n return plot_dir", "def plot_folder(path):\r\n plt.figure(figsize=(20, 10))\r\n for filename in glob.glob(path + '/*.pspec'):\r\n x, y= np.loadtxt(fname=filename, delimiter='\\t',dtype=int, usecols = (1,2),\r\n skiprows=100, unpack = True)\r\n plt.plot(x, y)\r\n return plt.show()", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def test_make_dataset_happy_path(self):\n # User story: user runs src.make_dataset() on the current directory\n # and gets a fully functional dataset\n pass", "def plot_directory_profiles(path, outname=None, show=True, xscale=1, yscale=1,\n xval='x', adjustRadial=True):\n outdirs = np.sort(os.listdir(path))\n plt.figure()\n\n #labels=['homogeneous','1D layering', '3D tomography'] #xscale=1e-3, yscale=1e2\n for i,outdir in enumerate(outdirs):\n pointsFile = os.path.join(path, outdir, 'points.h5')\n #print(pointsFile)\n #x_fem, ur_fem, uz_fem = pu.extract_points(pointsFile, output='cyl',adjustRadial=adjustRadial)\n #x_fem, ur_fem, uz_fem = pu.extract_points(pointsFile)\n #Load data\n\n x,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n #Y = uz / yscale\n if xval == 'x':\n X = x / xscale\n Y1 = ux / yscale\n elif xval == 'r':\n X = np.hypot(x,y) / xscale\n ur_fem = np.hypot(ux,uy)\n Y1 = ur_fem / yscale\n if adjustRadial: #fix sign from hypot square root\n ur_fem = pu.radial2negative(Y1)\n\n x_fem = X #/ xscale #double scaling!\n ur_fem = Y1 #/ yscale\n uz_fem = uz / yscale\n\n #print(pointsFile)\n print(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max())\n\n l, = plt.plot(x_fem,uz_fem,'.-',lw=3,label=outdir)\n #l, = plt.plot(x_fem,uz_fem,'.-',lw=2,label=labels[i]) #for 3d heterogeneity example\n plt.plot(x_fem,ur_fem,'.--',lw=3, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n # Annotate\n plt.axhline(color='k',lw=0.5)\n plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n plt.legend()\n\n #NOTE: custom annotations for 3d heterogeneity\n #plt.title('Elastic Heterogeneity Effects')\n #plt.legend([l1,l2,l3],['homogeneous','1D layering', '3D tomography'])\n\n if outname: plt.savefig(outname)\n if show: plt.show()", "def load_data_from_dir(self,\n dir_list=[],\n exclude=[]):\n dir_list_ = dir_list[:]\n\n if len(dir_list) == 0:\n eprint(\"CANNOT load data generator with an empty list of directories: {}\".format(dir_list))\n return\n\n for directory in dir_list_:\n if not os.path.isdir(directory):\n eprint(\"\\t\\t {}: {} is not a directory\".format(self.load_data_from_dir.__name__, directory))\n return\n\n # Read Data from current directory\n while dir_list_:\n # Pop first directory name and create dataloader if its a valid folder\n current_dir = dir_list_.pop(0)\n valid_dir = True\n for name in exclude:\n if name in current_dir and valid_dir:\n valid_dir = False\n data_file = current_dir + \"/data.mat\"\n if os.path.isfile(data_file) and \"takktile_\" in current_dir and valid_dir:\n self.dataloaders.append(takktile_dataloader(data_dir=current_dir,\n config=self.config,\n augment=self.augment))\n\n # Find all child directories of current directory and recursively load them\n data_dirs = [os.path.join(current_dir, o) for o in os.listdir(current_dir)\n if os.path.isdir(os.path.join(current_dir, o))]\n for d in data_dirs:\n dir_list_.append(d)\n\n self.num_dl = len(self.dataloaders)\n\n if self.transform_type:\n self.__calculate_data_transforms()\n\n # Create Eval Data\n if self.create_eval_data:\n self.eval_len = (self.__len__())//10\n self.create_eval_data = False\n\n # Calculate class number and ratios\n # Also calculate class diffs\n if not self.config['label_type'] == 'value':\n self.__class_nums = self.dataloaders[0].get_data_class_numbers(self.__get_data_idx(0))\n for i, dl in enumerate(self.dataloaders[1:]):\n self.__class_nums += dl.get_data_class_numbers(self.__get_data_idx(i+1))\n self.__class_ratios = self.__class_nums / float(np.mean(self.__class_nums))\n self.__class_diff = np.max(self.__class_nums) - self.__class_nums\n self.__class_diff = [d if n > 0 else 0 for n,d in zip(self.__class_nums, self.__class_diff)]\n\n # Reset and prepare data\n self.on_epoch_end()", "def plot_and_save_2d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (2d)'+'-'*24\n \n print 'Loading data...',\n data = load_file(path_name+file_name)\n t = data['t']\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # Moment.\n plt.figure(1)\n plt.plot(t, data['dyn']['M'], t, data['static']['M'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment')\n plt.grid()\n plt.savefig('%sM.png' %pic_path)\n\n # Axial force.\n plt.figure(2)\n plt.plot(t, data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fa')\n plt.title('Fa')\n plt.grid()\n plt.savefig('%sFa.png' %pic_path)\n\n # Transverse force.\n plt.figure(3)\n plt.plot(t, data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Ft')\n plt.title('Ft')\n plt.grid()\n plt.savefig('%sFt.png' %pic_path)\n\n # Resultant force.\n plt.figure(4)\n plt.plot(t, np.sqrt(data['dyn']['FY']**2+data['dyn']['FZ']**2),\n t, np.sqrt(data['static']['FY']**2+data['static']['FZ']**2))\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fr')\n plt.title('Fr')\n plt.grid()\n plt.savefig('%sFr.png' %pic_path)\n print 'done'\n\n if show:\n plt.show()", "def graph_dir(\n directory: str,\n filename: str = '',\n orientation: str = 'LR',\n data: bool = False,\n show_files: bool = True,\n show_hidden: bool = False,\n max_depth: int = -1,\n ranksep: Union[float, None] = None,\n file_type: str = 'svg',\n render: bool = True\n) -> None:\n assert directory in os.listdir(), \\\n f'Invalid argument for \"directory\". {directory} is not in the current directory'\n options = ['LR', 'RL', 'TB', 'BT']\n assert orientation.upper() in options, \\\n f'Invalid argument for \"orientation\". Must be one of {\", \".join(options)}'\n assert file_type in ['svg', 'png'], \\\n 'Invalid argument for \"file_type\". Must be either \"png\" or \"svg\"'\n\n options = {'rankdir': orientation.upper(), 'overlap': 'scale', 'splines': 'polyline'}\n if ranksep is not None:\n options['ranksep'] = str(ranksep)\n\n tree = Digraph(graph_attr = options)\n index = 0\n multiple = lambda l: '' if l == 1 else 's'\n\n # Get data for size of each folder\n if data:\n dir_sizes = size(directory)\n\n walkdir = os.path.normpath(f'./{directory}/')\n # directory_data is the string used to build up the text in the nodes.\n directory_data = []\n # file_node is the string used to build file information up the text in the nodes.\n file_node = []\n for root, dirs, files in os.walk(walkdir):\n if max_depth > 0 and root.count(os.sep) >= max_depth:\n continue\n if not show_hidden:\n dirs[:] = [dir_ for dir_ in dirs if not dir_.startswith(('__', '.'))]\n tree.attr('node', shape='folder', fillcolor='lemonchiffon', style='filled,bold')\n\n parent_directory = directory if root == '.' else root\n directory_data.clear()\n directory_data.extend(os.path.basename(parent_directory))\n \n file_memory = convert(sum([os.path.getsize(os.path.join(root, f)) for f in files]))\n # Display directory data if parameters permit\n if data:\n directory_data.extend(f' ({dir_sizes[root]})')\n # \\l left aligns items in their container\n directory_data.append('\\l')\n if data and dirs:\n directory_data.extend(f'{len(dirs)} Folder{multiple(len(dirs))}\\l')\n if data and files:\n directory_data.extend(f'{len(files)} File{multiple(len(files))}')\n if not show_files and dirs:\n directory_data.extend(f' ({file_memory})')\n directory_data.append('\\l')\n\n root = root.replace(os.sep, '')\n tree.node(root, label=''.join(directory_data))\n for dir_ in dirs:\n path = os.path.join(root, dir_).replace(os.sep, '')\n tree.node(path, label=dir_)\n tree.edge(root, path)\n\n if files and show_files:\n index += 1\n tree.attr('node', shape='box', style='')\n # Display files in a box on the graph as well as memory information\n # if parameters permit\n if data:\n file_node.extend(f'{len(files)} File{multiple(len(files))} ({file_memory})\\l')\n file_node.extend(('\\l'.join(files), '\\l'))\n file_node_str = ''.join(file_node)\n file_node.clear()\n id_ = f'{index}{file_node_str}'.replace(os.sep, '')\n tree.node(id_, label=file_node_str)\n tree.edge(root, id_)\n\n filename = filename.rsplit('.', 1)[0] if filename else f'{directory}_Graph'\n if not render:\n tree.render(filename, format=file_type)\n os.remove(filename)\n else:\n if file_type == 'png':\n url = f'https://quickchart.io/graphviz?format={file_type}&graph={tree.source}'\n with open(f'{filename}.{file_type}', mode='wb') as f:\n f.write(requests.get(url).content)\n else:\n url = f'https://quickchart.io/graphviz?graph={tree.source}'\n src = requests.get(url).text\n # If request failed no svg is sent.\n if '<svg' not in src and '</svg>' not in src:\n print('Error rendering graph with quickchart.io.')\n else:\n with open(f'{filename}.svg', mode='w') as f:\n f.write(src)", "def plot(data, filenames, game, destination, num_interplt):\n if isinstance(filenames, str):\n plt.plot(data[:,0], data[:,1], label=filenames)\n ax = plt.gca()\n ax.xaxis.get_major_formatter().set_powerlimits((0,1))\n plt.xlabel('# of steps')\n plt.ylabel('Cumulative reward')\n plt.title('{}'.format(game))\n plt.legend()\n figname = '{}_{}.png'.format(game, datetime_random_str())\n plt.savefig('{}/{}'.format(destination, figname))\n print('Images saved as {}/{}'.format(destination, figname))\n plt.close()\n else:\n print('{} results have been found.'.format(len(filenames)))\n agents = []\n for i in range(len(filenames)):\n # :-24 is to remove the previous datetime string\n # :-30 is to remove the train label\n agents.append(filenames[i][:-30])\n uni_agents = np.unique(np.array(agents))\n print('Game result from agent: {}'.format(uni_agents))\n fig = plt.figure()\n for agent in uni_agents:\n i_data = []\n for i in range(len(agents)):\n if agent == agents[i]:\n i_data.append(data[i])\n i_data = calculate_interpolation(np.array(i_data), num_interplt)\n plot_mean_standard_error(i_data, agent)\n plt.legend()\n ax = plt.gca()\n ax.xaxis.get_major_formatter().set_powerlimits((0,1))\n plt.xlabel('# of steps')\n plt.ylabel('Cumulative reward')\n plt.title('{}'.format(game))\n figname = '{}_{}.png'.format(game, datetime_random_str())\n plt.savefig('{}/{}'.format(destination, figname))\n plt.close()\n print('Images saved as {}/{}'.format(destination, figname))", "def create_plots(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n self.sse_plot()\n self.avg_sse_plot()", "def main():\n df_data = import_clean_process()\n plot_data_matplotlib(df_data)\n return", "def real_time_plot(files):\n global len_data, first_iter, colors\n\n for i,F in enumerate(files):\n\n # Load data\n data = pylab.loadtxt(F, delimiter=',', skiprows=1, usecols=(5,6,7))\n\n # Check if new data\n if (len_data!= len(data[:,0])):\n\n # Plot\n label = ntpath.basename(F)\n label = label[0:-4]\n ax.plot(data[:,0], data[:,1], data[:,2], colors[i], label=label)\n\n pyplot.draw()\n\n # Update globals\n len_data = len(data[:,0])\n\n if (first_iter == True):\n ax.legend()\n first_iter = False", "def plot_waveforms(data, name, title, directory_name):\n plt.figure(figsize=(20, 10))\n plt.plot(data)\n plt.title(title)\n plt.savefig('./' + directory_name + '/' + name)\n pass", "def autogen_dataset_dir():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n seed=42,\n sep=',')", "def generate_data(out_fname, data_directory):\n def store_result(duration, loci_number):\n \"\"\" Store result of current timing run\n \"\"\"\n print(' %ds for %d loci' % (duration, loci_number))\n\n if os.path.isfile(out_fname):\n with open(out_fname, 'r') as fd:\n cur = json.load(fd)\n else:\n cur = []\n\n with open(out_fname, 'w') as fd:\n cur.append((loci_number, duration))\n json.dump(cur, fd)\n\n for fn in os.listdir(data_directory):\n fname = os.path.join(data_directory, fn)\n\n print('Loading \"%s\"...' % fname, end=' ', flush=True)\n contacts = np.loadtxt(fname)\n print('Done')\n\n start = time.time()\n try:\n apply_shrec3d(contacts)\n except:\n print('>>> Some error occured')\n traceback.print_exc()\n end = time.time()\n\n store_result(end-start, contacts.shape[0])", "def updateDataStorage(infoToPlot, directory):\n \n \n dataStored=pickle.load(open(os.path.join(directory,'data.pkl'), 'rb'))\n \n for key, value in dataStored.items():\n value += infoToPlot[key]\n infoToPlot[key]=[] \n \n \n with open(os.path.join(directory,'data.pkl'), 'wb') as f:\n pickle.dump(dataStored, f, pickle.HIGHEST_PROTOCOL)", "def set_data_directory(path):\n if not os.path.exists(path):\n return False\n\n set(\"data_dir\", path)\n return True", "def do_data_plots(cat, subdir):\n dla_data.noterdaeme_12_data()\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(zmax=5,color=\"blue\")\n np.savetxt(path.join(subdir,\"cddf_all.txt\"), (l_N, cddf, cddf68[:,0], cddf68[:,1], cddf95[:,0],cddf95[:,1]))\n plt.xlim(1e20, 1e23)\n plt.ylim(1e-28, 5e-21)\n plt.legend(loc=0)\n save_figure(path.join(subdir, \"cddf_gp\"))\n plt.clf()\n\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(zmax=5,color=\"blue\", moment=True)\n plt.xlim(1e20, 1e23)\n plt.legend(loc=0)\n save_figure(path.join(subdir, \"cddf_moment_gp\"))\n plt.clf()\n\n #Evolution with redshift\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(4,5, label=\"4-5\", color=\"brown\")\n np.savetxt(path.join(subdir,\"cddf_z45.txt\"), (l_N, cddf, cddf68[:,0], cddf68[:,1], cddf95[:,0],cddf95[:,1]))\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(3,4, label=\"3-4\", color=\"black\")\n np.savetxt(path.join(subdir,\"cddf_z34.txt\"), (l_N, cddf, cddf68[:,0], cddf68[:,1], cddf95[:,0],cddf95[:,1]))\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(2.5,3, label=\"2.5-3\", color=\"green\")\n np.savetxt(path.join(subdir,\"cddf_z253.txt\"), (l_N, cddf, cddf68[:,0], cddf68[:,1], cddf95[:,0],cddf95[:,1]))\n (l_N, cddf, cddf68, cddf95) = cat.plot_cddf(2,2.5, label=\"2-2.5\", color=\"blue\")\n np.savetxt(path.join(subdir,\"cddf_z225.txt\"), (l_N, cddf, cddf68[:,0], cddf68[:,1], cddf95[:,0],cddf95[:,1]))\n plt.xlim(1e20, 1e23)\n plt.ylim(1e-28, 5e-21)\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"cddf_zz_gp\"))\n plt.clf()\n\n #dNdX\n dla_data.dndx_not()\n dla_data.dndx_pro()\n (z_cent, dNdX, dndx68, dndx95) = cat.plot_line_density(zmax=5)\n np.savetxt(path.join(subdir,\"dndx_all.txt\"), (z_cent, dNdX, dndx68[:,0],dndx68[:,1], dndx95[:,0],dndx95[:,1]) )\n plt.legend(loc=0)\n plt.ylim(0,0.16)\n save_figure(path.join(subdir,\"dndx_gp\"))\n plt.clf()\n\n #Omega_DLA\n dla_data.omegahi_not()\n dla_data.omegahi_pro()\n dla_data.crighton_omega()\n (z_cent, omega_dla, omega_dla_68, omega_dla_95) = cat.plot_omega_dla(zmax=5)\n# cat.tophat_prior = True\n# cat.plot_omega_dla(zmax=5, label=\"Tophat Prior\", twosigma=False)\n# cat.tophat_prior = False\n np.savetxt(path.join(subdir,\"omega_dla_all.txt\"), (z_cent, omega_dla, omega_dla_68[:,0],omega_dla_68[:,1], omega_dla_95[:,0], omega_dla_95[:,1]))\n plt.legend(loc=0)\n plt.xlim(2,5)\n plt.ylim(0,2.5)\n save_figure(path.join(subdir,\"omega_gp\"))\n plt.clf()", "def matplotlib_plot(self, output_dir: Union[str, Path]):\n keys2 = set.union(*[set(self.get_keys2(k)) for k in self.get_keys()])\n for key2 in keys2:\n keys = [k for k in self.get_keys() if key2 in self.get_keys2(k)]\n plt = self._plot_stats(keys, key2)\n p = Path(output_dir) / f\"{key2}.png\"\n p.parent.mkdir(parents=True, exist_ok=True)\n plt.savefig(p)", "def gen_data(data_root, val_ratio = 0.1):\n session_list = os.listdir(data_root)", "def create_dataset_folder_structure():\n\n path = Path(f'{DATASETS}/{FEATURES_DATASET}')\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n\n try:\n for path in new_sensor_paths:\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n else:\n print(\"\\nPath already exists!\")\n except:\n return False\n else:\n return True", "def dump_data(self):\n while not path.isdir(self.directory):\n print(\n \"# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]\".format(\n self.directory))\n select = input()\n if select == \"2\":\n self.directory = input(\"Enter new directory: \\n\")\n else:\n mkdir(self.directory)\n print(\"# Directory \" + self.directory + \" created\")\n\n self.fullpath = self.directory + \"/\" + self.fName\n\n self.data_instance.dump_data(self.fullpath)", "def run(**kwargs):\n del kwargs # Unused args\n if os.path.exists(DATASET_PATH):\n LOGGER.info('... Dataset already exists. Skipping.')\n else:\n build()", "def loadPredictions(self):\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n message = 'Select folder'\n folderDialog = QtWidgets.QFileDialog(self, message, dir_path)\n folderDialog.setFileMode(QtWidgets.QFileDialog.Directory)\n folderDialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, True)\n fileName = [] # Returns a list of the directory\n\n # Plot the window to select the csv file\n if folderDialog.exec_():\n fileName = folderDialog.selectedFiles()\n # Debug\n #fileName = ['/media/dimitris/TOSHIBA EXT/Image_Document_Classification/PMC-Dataset']\n print(fileName)\n if os.path.isdir(str(fileName[0])):\n self.loadFolder(str(fileName[0]))\n else:\n message = 'Only csv files'\n self.messageBox(message)\n return\n\n self.selectFigures()", "def autogen_dataset_dir_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n test_path='tests/data/dummy_tabular_test',\n seed=42,\n sep=',')", "def plot_one_directory(args, dirnames, figname):\n logdir = args.logdir\n num = len(ATTRIBUTES)\n fig, axes = subplots(num, figsize=(13,4*num))\n\n for (dd, cc) in zip(dirnames, COLORS):\n A = np.genfromtxt(join(logdir, dd, 'log.txt'), \n delimiter='\\t', \n dtype=None, \n names=True)\n x = A['Iterations']\n\n for (i,attr) in enumerate(ATTRIBUTES):\n axes[i].plot(x, A[attr], '-', lw=lw, color=cc, label=dd)\n axes[i].set_ylabel(attr, fontsize=ysize)\n axes[i].tick_params(axis='x', labelsize=tick_size)\n axes[i].tick_params(axis='y', labelsize=tick_size)\n axes[i].legend(loc='best', ncol=2, prop={'size':legend_size})\n\n axes[0].set_ylim([-10,10])\n axes[1].set_ylim([-10,10])\n axes[2].set_ylim([0,10])\n axes[3].set_ylim([0,10])\n\n plt.tight_layout()\n plt.savefig(figname)", "def plot_dat_file(dat_paths: [str]):\n import pandas as pd\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots(1, 3, sharey=\"all\", sharex=\"col\", figsize=(8, 6))\n for i, dat_path in enumerate(dat_paths):\n if i == i:\n skipfoot = 11 + 9\n else:\n skipfoot = 11\n dat_file = pd.read_csv(\n dat_path,\n skiprows=3,\n skipfooter=skipfoot,\n header=None,\n delim_whitespace=True,\n engine=\"python\",\n )\n depth = dat_file.values[:, 0]\n vp = dat_file.values[:, 1]\n vs = dat_file.values[:, 3]\n dens = dat_file.values[:, 5]\n\n ax[0].plot(vp, depth, label=f\"nr {i}\")\n\n ax[1].plot(vs, depth)\n ax[2].plot(dens, depth)\n ax[0].set_ylim(ax[0].get_ylim()[::-1])\n ax[0].legend()\n plt.show()", "def plot_chosen_data(main, dataPath):\n error = \"Error \"+errorPath+\"plot_chosen_data: Must choose data of proper format (tiff, jpeg, etc.)\"\n try:\n if dataPath == '':\n main.msg('thinks it has nothing')\n main.msg(error)\n return\n data = mpimg.imread(dataPath)\n imgObj = Img.Img(data, title = os.path.basename(dataPath), filePath = dataPath)\n main.imgObjList.append(imgObj)\n main.horizontalSlider.setMaximum(len(main.imgObjList)-1)\n main.horizontalSlider.setValue(main.horizontalSlider.maximum())\n func.plot_img_obj(main, imgObj)\n except:\n main.msg(error)", "def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()", "def _create_data_directory(self):\n self.src_data_dir.mkdir(exist_ok=True, parents=True)", "def dir_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular', sep=',')", "def dirGenerator(datadirectory):\n\n subdirectories = [row for row in os.listdir(datadirectory) if '$' not in row]\n\n #iterate through subdirectories\n for day in subdirectories:\n\n #collect raw data set file names in sub directories\n fileNames = [row for row in os.listdir(datadirectory + day + '\\\\RawDataFiles\\\\')]\n\n #iterate over the raw datasets\n print 'There are ' + str(len(fileNames)) + ' datasets in ' + day\n for index, datafile in enumerate(fileNames):\n yield datadirectory + day + '\\\\RawDataFiles\\\\' + datafile, day, datafile, index", "def plot_data_subject_dirs(data_dirs=None, file_list=None,\n labelby=None, limitby=None, plots=None, figsize=None,\n transparency=1., yscale='linear', xrange=None,\n yrange=None, force_axes_same_scale=True,\n process_data=pass_through, limit_mult_files=np.inf, debug=1):\n df_type = 'wide'\n if plots is None:\n plots = dict(grid=True)\n senlistorder = None\n\n all_data_overlaid = ('all_data_traces' in plots and\n (plots['all_data_traces'] is not None))\n printed_entry_info = False\n if ((data_dirs is None) and (file_list is None)):\n if isinstance(limit_mult_files, tuple):\n limit_mult, bal_list = limit_mult_files\n else:\n bal_list = None\n limit_mult = limit_mult_files\n if np.isinf(limit_mult):\n limit_mult = None\n file_list = sample_file_list(\n limitby=limitby,\n limit_mult_files=limit_mult,\n balance_types=bal_list, df_type=df_type,\n seed=42, debug=max(0, debug - 1))\n\n if file_list is None:\n file_list, unique_entries, total_files = accumulate_subject_file_list(\n data_dirs, limitby=limitby, limit_mult_files=limit_mult_files,\n df_type=df_type, debug=debug)\n if debug:\n print('unique entries in metadata from file accumulation')\n for k in unique_entries:\n print(f' {k}: {unique_entries[k]}')\n printed_entry_info = True\n else:\n total_files = len(file_list)\n plot_sensor = None\n if all_data_overlaid:\n if transparency == 1.:\n transparency = 0.5\n plot_to_make = sum([bool(plots[k]) for k in plots if 'all_data' not in k])\n if isinstance(plots['all_data_traces'], str):\n plot_sensor = plots['all_data_traces']\n if plot_to_make == 0:\n if isinstance(plots['all_data_traces'], str):\n plots['overlap'] = True\n else:\n plots['grid'] = True\n assert sum([bool(plots[k]) for k in plots if 'all_data' not in k]) == 1, (\n \"cannot display multiple plot types\")\n assert isinstance(plots['all_data_traces'], str) or (\n 'overlap' not in plots or not(plots['overlap'])), (\n \"cannot plot single overlapping plot if sensor is not specified\")\n if figsize is None and 'grid' in plots and isinstance(plots['grid'], str):\n if plots['grid'].startswith('square'):\n figsize = (16, 18)\n else:\n figsize = (15, 64 * 8)\n\n pyplot.figure(figsize=figsize)\n legd = []\n running_min_max = (np.inf, -np.inf)\n if debug == 1:\n progress_bar = tqdm.tqdm(total=total_files, miniters=1)\n else:\n legd = None\n if figsize is None:\n figsize = (12, 14)\n if isinstance(limit_mult_files, tuple):\n limit_mult_files = limit_mult_files[0]\n\n file_count = 0\n color_dict = dict()\n unique_entries = defaultdict(set)\n for file in file_list:\n orig_data_dir = int(('test' in str.lower(file)))\n if file in files_skip_processing:\n continue\n if all_data_overlaid:\n if debug == 1:\n progress_bar.n = file_count\n progress_bar.set_description('files processed')\n if file_count >= limit_mult_files:\n break\n else:\n clear_output()\n\n full_file_url = file\n if debug > 1:\n print(f'read file: {full_file_url}')\n\n df, info = my_read_eeg_generic(full_file_url, df_type=df_type,\n orig_tt_indic=orig_data_dir)\n\n if all_data_overlaid:\n if labelby and labelby in info:\n id = labelby + ':' + str(info[labelby])\n else:\n id = info['subject']\n else:\n id = None\n if debug > 1:\n print(' | '.join([f'{n:>8s}:{str(v):4s}' for n, v in info.items()]))\n\n sen_index = df.columns.names.index('sensor')\n senlist = df.columns.levels[sen_index]\n if senlistorder is None:\n senlistorder = senlist\n elif all_data_overlaid:\n assert all([sl == chkl\n for sl, chkl, in zip(senlist, senlistorder)]), (\n 'different data set has list of sensors in a '\n 'different order')\n Z = df.values\n nsamp, nsen = Z.shape\n time = np.arange(nsamp) / SAMP_FREQ\n x_data, Z, xlabel, ylabel = process_data(time, Z, 'time (s)',\n 'voltage (uV)', fs=SAMP_FREQ)\n if all_data_overlaid and force_axes_same_scale:\n running_min_max = (min(Z.min(), running_min_max[0]),\n max(Z.max(), running_min_max[1]))\n minv, maxv = running_min_max\n else:\n minv = maxv = None\n if ('overlap' in plots and plots['overlap']):\n plot_all_overlaid(x_data, Z, xlabel, ylabel, senlist, figsize,\n id=id, yscale=yscale, yrange=yrange, xrange=xrange,\n multi_trace_plot_labels=(file_count == 0),\n color_dict=color_dict, transparency=transparency,\n plot_sensor=plot_sensor, legend=legd)\n\n if ('grid' in plots and plots['grid']):\n grid_square = (not(isinstance(plots['grid'], str)) or\n plots['grid'].startswith('square'))\n plot_grid(x_data, Z, xlabel, ylabel, senlist, minv, maxv,\n id=id, grid_square=grid_square, figsize=figsize,\n multi_trace_plot_labels=(file_count == 0),\n yscale=yscale, yrange=yrange, xrange=xrange,\n color_dict=color_dict, transparency=transparency,\n legend=legd)\n\n if ('threed' in plots and plots['threed']) and not(\n all_data_overlaid):\n y_data = df.columns.labels[sen_index].values()\n plot_3d(x_data, y_data, Z, df, xlabel, ylabel, figsize=figsize)\n\n if not(all_data_overlaid):\n input('press enter to cont...')\n file_count += 1\n for k in info:\n unique_entries[k].add(info[k])\n\n if file_count >= limit_mult_files:\n break\n\n if all_data_overlaid:\n if 'overlap' in plots and plots['overlap']:\n pyplot.xlabel(xlabel, fontsize=14)\n pyplot.ylabel(ylabel, fontsize=15)\n # if minmax[1]/(minmax[0] if minmax[0] > 0 else 1.) > 1e1:\n # pyplot.axes().set_xscale('log', basex=2)\n pyplot.title(f'Sensor: {plots[\"all_data_traces\"]}', fontsize=15)\n pyplot.legend(handles=legd, fontsize=15)\n pyplot.show()\n if debug and not(printed_entry_info):\n print('unique entries in metadata from file accumulation')\n for k in unique_entries:\n print(f' {k}: {unique_entries[k]}')\n return file_list", "def updateplot(self):\n plotfiles = []\n try:\n self.plotter.reset()\n self.plotter.set_xrange(self.xrangemin.value(), self.xrangemax.value())\n self.plotter.set_yrange(self.yrangemin.value(), self.yrangemax.value())\n self.plotter.set_bgirange(self.bgintmin.value(), self.bgintmax.value())\n self.plotter.set_pkrange(self.halphamin.value(), self.halphamax.value())\n for n,pf in enumerate(self.selecteddata):\n tf = os.path.join(self.tempdir, \"tf%d\" % n)\n self.dfparser.writefile(tf, pf)\n plotfiles.append(tf)\n self.plotter.set_plot(plotfiles)\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()\n except plotter.Plotter_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()", "def runPathing(userInputs, gridfile, n, totalCount, current_x, current_y, count, SS, data_new, gauss_data=None, data, textfile, loopCount=1):\n\n for i in range(totalCount):\n filename = gridfile #'my_file.dat'\n with open(filename) as f: # with/open/as syntax useful for files\n array1 = [[float(x) for x in line.split()] for line in f]\n nparray = np.array(array1) # convert array to numpy array\n f.close()\n \n x = nparray[:,0] # x is first collumn, y is second\n x = x.reshape(n+1,n+1)\n y = nparray[:,1] \n y = y.reshape(n+1,n+1)\n \n distance = np.zeros((n+1, n+1))\n \n current = 1\n\n ##############################################\n # If you want to plot something, do that here:\n ##############################################\n\n # Ready plot for inside if statement\n fig = plt.figure()\n #plt.subplot(1,2,1)\n plt.imshow(data, cmap='coolwarm',vmin=0,vmax=1.5)\n plt.colorbar(ticks=[0.0,0.25,0.5,0.75,1.0,1.25,1.5,1.75,2.0],extend='max')\n #plot = plt.scatter([], [])\n \n #plot point of max variance\n #maxi = np.argmax(data)\n #maxi_tuple = np.unravel_index(maxi, (n+1, n+1))\n #ymax, xmax = maxi_tuple\n #plot1 = plt.scatter(xmax,ymax)\n #plot.set_offsets(SS)\n plt.title('100 Realizations')\n \n plt.axis([0, n, 0, n])\n ###############################################\n # End of first part of plotting\n ###############################################\n \n # For statement looping through all points\n #for i in range(current+1, totalCount):\n distance = createDistance(current_x, current_y, distance, n)\n weight = totalWeighting(distance, count, data, n)\n current_x, current_y = newPoint(current_x, current_y, weight, n)\n \n ##############################################\n # Continue plotting if you want to add points\n ##############################################\n # plot new point and save image\n point = (current_y, current_x)\n SS = np.append(SS, point)\n #plot.set_offsets(SS)\n\n # subplot 2\n #data_new[current_x,current_y] = gauss_data[current_x,current_y]\n #plt.subplot(1,2,2)\n #plt.title('Data Reveal')\n #plt.imshow(data_new, cmap='coolwarm',vmin=0,vmax=1, origin='lower')\n #plt.colorbar(ticks=[0.0,0.25,0.5,0.75,1.0],extend='max')\n path = 'images/img%i%i.png' % (loopCount,i)\n plt.savefig(path, format='png')\n\n plt.close()\n ##############################################\n # Continue plotting if you want to add points\n ##############################################\n \n # mark point as visited\n count[current_x][current_y] = 0\n \n # Add point to file object\n file_object = open(textfile, 'a')\n if userInputs is False:\n file_object.write('%s %s %s \\n' % (x[current_x][current_y], y[current_x][current_y], gauss_data[current_x][current_y]))\n elif userInputs if True:\n collectedData = input('Data collected for point (%s,%s): ' % (current_x, current_y))\n file_object.write('%s %s %s \\n' % (x[current_x][current_y], y[current_x][current_y], collectedData))\n file_object.close()\n\n # Return last point measured\n return current_x, current_y, count, SS, weight,data_new", "def generate_all_cost_plots(suffix):\n directory_name = \"inputs/\"\n directory = os.fsencode(directory_name)\n outfolder = \"plots/\" + suffix.strip(\".in\") + \"/\"\n try:\n os.makedirs(outfolder)\n except FileExistsError:\n pass\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(suffix):\n print(\"Solving : \", filename)\n inputfile = directory_name + filename\n num_clusters, cost = cost_vs_clusters(inputfile)\n outfile = outfolder + filename.strip(\".in\") + \".png\"\n plot_cost_vs_clusters(cost,num_clusters,outfile)", "def save_plot(directory, name = None):\n global global_figure_count \n if directory[-1]!=\"/\":\n directory = directory + \"/\"\n directory_paths = directory.split(\"/\")\n prefix = directory_paths[0]\n for i in directory_paths[1:]:\n if not os.path.exists(prefix):\n os.mkdir(prefix)\n prefix = prefix + \"/\" + i\n\n if len(plt.get_fignums()) == 1:\n if name is None:\n plt.savefig(directory + 'figure%d.png' % global_figure_count, bbox_inches='tight', dpi = 600)\n global_figure_count+=1\n else:\n plt.savefig(directory + '%s' % name, bbox_inches='tight', dpi = 600)\n else:\n for i in plt.get_fignums():\n plt.figure(i)\n plt.savefig(directory + 'figure%d.png' % global_figure_count, bbox_inches='tight', dpi = 600)\n global_figure_count+=1\n plt.close('all')", "def prepare_dataset(fpath):\n raise NotImplementedError", "def make_line_plot(\r\n dir_path, data_file_link, background_color, label_color, xy_coords,\r\n props, x_len=8, y_len=4, draw_axes=False, generate_eps=True):\r\n rc('font', size='8')\r\n rc('axes', linewidth=.5, edgecolor=label_color)\r\n rc('axes', labelsize=8)\r\n rc('xtick', labelsize=8)\r\n rc('ytick', labelsize=8)\r\n fig = figure(figsize=(x_len, y_len))\r\n mtitle = props.get(\"title\", \"Groups\")\r\n x_label = props.get(\"xlabel\", \"X\")\r\n y_label = props.get(\"ylabel\", \"Y\")\r\n\r\n title('%s' % mtitle, fontsize='10', color=label_color)\r\n xlabel(x_label, fontsize='8', color=label_color)\r\n ylabel(y_label, fontsize='8', color=label_color)\r\n\r\n sorted_keys = sorted(xy_coords.keys())\r\n labels = []\r\n for s_label in sorted_keys:\r\n s_data = xy_coords[s_label]\r\n c = s_data[3]\r\n m = s_data[2]\r\n plot(s_data[0], s_data[1], c=c, marker=m, label=s_label,\r\n linewidth=.1, ms=5, alpha=1.0)\r\n\r\n fp = FontProperties()\r\n fp.set_size('8')\r\n legend(prop=fp, loc=0)\r\n\r\n show()\r\n\r\n img_name = 'scree_plot.png'\r\n savefig(\r\n os.path.join(dir_path,\r\n img_name),\r\n dpi=80,\r\n facecolor=background_color)\r\n\r\n # Create zipped eps files\r\n eps_link = \"\"\r\n if generate_eps:\r\n eps_img_name = str('scree_plot.eps')\r\n savefig(os.path.join(dir_path, eps_img_name), format='eps')\r\n out = getoutput(\"gzip -f \" + os.path.join(dir_path, eps_img_name))\r\n eps_link = DOWNLOAD_LINK % ((os.path.join(data_file_link, eps_img_name)\r\n + \".gz\"), \"Download Figure\")\r\n\r\n return os.path.join(data_file_link, img_name), eps_link", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)", "def autogen_dataset_dir_ratios():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n seed=42,\n sep=',',\n test_ratio=0.5,\n val_ratio=0.5)", "def plot_and_save_3d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (3d)'+'-'*24\n \n print 'Loading force data...', \n data = load_file(path_name+file_name)\n t = data['t']\n dyn = 1.0\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # x-moment\n plt.figure(1)\n plt.plot(t, dyn*data['dyn']['MX'], t, data['static']['MX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mx')\n plt.title('Moment (x)')\n plt.grid()\n plt.savefig('%sMx.png' %pic_path)\n\n # y-moment\n plt.figure(2)\n plt.plot(t, dyn*data['dyn']['MY'], t, data['static']['MY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment (y)')\n plt.grid()\n plt.savefig('%sMy.png' %pic_path)\n\n # z-moment\n plt.figure(3)\n plt.plot(t, dyn*data['dyn']['MZ'], t, data['static']['MZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mz')\n plt.title('Moment (z)')\n plt.grid()\n plt.savefig('%sMz.png' %pic_path)\n \n # x-force\n plt.figure(4)\n plt.plot(t, dyn*data['dyn']['FX'], t, data['static']['FX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fx')\n plt.title('Fx')\n plt.grid()\n plt.savefig('%sFx.png' %pic_path)\n\n # y-force\n plt.figure(5)\n plt.plot(t, dyn*data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fy')\n plt.title('Fy')\n plt.grid()\n plt.savefig('%sFy.png' %pic_path)\n\n # z-force\n plt.figure(6)\n plt.plot(t, dyn*data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fz')\n plt.title('Fz')\n plt.grid()\n plt.savefig('%sFz.png' %pic_path)\n print 'done'\n\n #nice_looking_plots(t, data['dyn'], data['static'])\n\n if show:\n plt.show()", "def gc_data(dataset, dirname, train_ratio=0.8):\n \n # Define path where dataset should be saved\n data_path = \"data/{}.pth\".format(dataset)\n\n # If already created, do not recreate\n if os.path.exists(data_path):\n data = torch.load(data_path)\n else:\n if dataset == 'syn6':\n #G = gengraph.gen_syn6()\n data = SimpleNamespace()\n with open('data/BA-2motif.pkl', 'rb') as fin:\n data.edge_index, data.x, data.y = pkl.load(fin)\n data.x = np.ones_like(data.x)\n else:\n # MUTAG\n data = SimpleNamespace()\n with open('data/Mutagenicity.pkl', 'rb') as fin:\n data.edge_index, data.x, data.y = pkl.load(fin)\n\n # Define NumSpace dataset\n data.x = torch.FloatTensor(data.x)\n data.edge_index = torch.FloatTensor(data.edge_index)\n data.y = torch.LongTensor(data.y)\n _, data.y = data.y.max(dim=1)\n data.num_classes = 2\n data.num_features = data.x.shape[-1]\n data.num_nodes = data.edge_index.shape[1]\n data.num_graphs = data.x.shape[0]\n data.name = dataset\n\n # Shuffle graphs \n p = torch.randperm(data.num_graphs)\n data.x = data.x[p]\n data.y = data.y[p]\n data.edge_index = data.edge_index[p]\n \n # Train / Val / Test split\n data.train_mask, data.val_mask, data.test_mask = split_function(\n data.y, train_ratio)\n # Save data\n torch.save(data, data_path)\n return data", "def generate_plots(type_, data, name, plots_location):\n plt.cla()\n plt.clf()\n plot_type = getattr(sns, type_)\n plot_ = plot_type(data)\n fig = plot_.get_figure()\n fig.savefig('{}/{}_{}.png'.format(plots_location, name, type_))", "def rewrite_data_files():\n return False", "def setDataDir(self, directory):\n if os.path.exists(directory):\n self.__datadir = directory\n print(\"Datadir setted to '%s'\" % directory)\n else:\n raise ValueError(\"Incorrect file path %s\" % directory)", "def load_data(f='', use_cols=[], xlabel='', ylabel='',scatter=False,contour=False, connect=False,\n errorbar=False, std_col=None, data_domain=None, labo=[], title='',\n return_data=False, graph=False, multiple_files=None, file_stem='', folder=None,\n file_list = None, color_list=None, combine=False):\n\n def combine_data(data_files_dict):\n \"\"\"\n This combines data from different files so that the first column of file 1 is concatenated with first column\n oof file 2 and so on\n :param data_files_dict:\n :return:\n \"\"\"\n key_list = list(data_files_dict.keys())\n no_col = len(data_files_dict[key_list[0]])\n combined = []\n for n in range(0, no_col):\n d = np.empty(shape=[0, 1])\n for k in data_files_dict:\n d = np.append(d, data_files_dict[k][n])\n combined.append(d)\n return combined\n\n def data_graph(graph=False):\n \"\"\"\n Gets data from folder or list of files or file and graphs it in\n some manner\n :param graph:\n :return:\n \"\"\"\n def axes_data(use_cols1, data1, domain=None):\n if domain is not None:\n axis = [0] * (2 * len(use_cols1))\n for k in range(len(use_cols1)):\n axis[2 * k] = domain\n axis[2 * k + 1] = data1[k]\n return axis\n else:\n axis = [data1[k] for k in use_cols1]\n return axis\n if graph:\n axes = axes_data(use_cols, data, domain=data_domain)\n if scatter:\n for i in range(int(len(axes)/2)):\n if i % 2 == 0:\n plt.scatter(axes[2*i], axes[2*i+1], color_list[i],\n label=labo[i])\n else:\n plt.scatter(axes[2 * i], axes[2 * i + 1], color_list[i]\n ,label=labo[i])\n\n if connect:\n for i in range(int(len(axes)/2)):\n if i % 2 == 0:\n plt.plot(axes[2*i], axes[2*i+1], color_list[i],\n label=labo[i])\n else:\n plt.plot(axes[2 * i], axes[2 * i + 1], color_list[i]\n , label=labo[i])\n\n if errorbar:\n plt.errorbar(*axes, yerr=data[std_col], fmt='o', label=labo)\n if contour:\n from mpl_toolkits.mplot3d import Axes3D\n from matplotlib import cm\n from matplotlib.ticker import LinearLocator, FormatStrFormatter\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n z = axes[2][:100]\n len_z = len(z)\n n = np.sqrt(len_z)\n x = np.arange(0, n)\n y = x\n x, y = np.meshgrid(x,y)\n two_dz = z.reshape((int(n), int(n)))\n surf = ax.plot_surface(x,y,two_dz,cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n # Customize the z axis.\n ax.set_zlim(0, 0.18)\n ax.zaxis.set_major_locator(LinearLocator(10))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n\n # Add a color bar which maps values to colors.\n fig.colorbar(surf, shrink=0.5, aspect=5)\n # heat_map = plt.pcolor(two_dz)\n # heat_color = plt.colorbar(heat_map, orientation = 'horizontal')\n # heat_color.set_label('Average Fidelity')\n\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.legend()\n plt.show()\n\n if multiple_files and folder is not None:\n os.chdir(folder)\n files = sorted(glob.glob(file_stem))\n for g in files:\n data = genfromtxt(g, dtype=float, unpack=True)\n data_graph(graph)\n elif multiple_files and file_list:\n if combine:\n file_dictionary = {g: genfromtxt(g, dtype=float, unpack=True) for g in file_list}\n data = combine_data(file_dictionary)\n data_graph(graph)\n else:\n for g in file_list:\n data = genfromtxt(g, dtype=float, unpack=True)\n data_graph(graph)\n\n else:\n data = genfromtxt(f, dtype=float, unpack=True)\n data_graph(graph)\n\n if return_data:\n retrieved = [data[k] for k in use_cols]\n return retrieved", "def plot_input_data(data_path,file_name,plots_folder,format_type,lower_matrix,upper_matrix,normalized=False,output_name=None):\n if output_name is None:\n aux = ['input_data']\n else:\n aux = [output_name,'input_data']\n if normalized is True:\n aux.append('normalized')\n plot_file_name = '-'.join(aux)+'.png'\n plot_file_name = os.path.join(plots_folder,plot_file_name)\n\n M_max, M = check_format_input(data_path,file_name,lower_matrix,upper_matrix,format_type,return_M = True,create_input_file=False)\n plt.figure()\n plt.subplot(111,aspect='equal')\n if(normalized):\n plt.imshow(M/float(M_max), cmap=plt.cm.Oranges, interpolation=\"nearest\")\n plt.colorbar()\n plt.title('Input data (normalized)')\n\n plt.savefig(plot_file_name)\n print 'Saved input data plot in %s' % plot_file_name\n else:\n plt.imshow(M, cmap=plt.cm.Oranges, interpolation=\"nearest\")\n plt.colorbar()\n plt.title('Input data')\n plt.savefig(plot_file_name)\n print 'Saved input data plot in %s' % plot_file_name\n return()", "def plot_directory_numex(path, vals, param='density', outname=None, show=True,\n xscale=1e-3,yscale=1e2):\n #vals = arange(2300.0, 2800.0, 50.0)\n outdirs = np.sort(os.listdir(path))\n plt.figure()\n\n # Plot surface profiles for each parameter\n for val,outdir in zip(vals,outdirs):\n pointsFile = os.path.join(path, outdir, 'points.h5')\n print(pointsFile)\n x_fem, ur_fem, uz_fem = pu.extract_points(pointsFile, output=True, adjustRadial=True)\n x_fem = x_fem / xscale\n ur_fem = ur_fem / yscale\n uz_fem = uz_fem / yscale\n l, = plt.plot(x_fem,uz_fem,'.-',label=str(val))\n plt.plot(x_fem,ur_fem,'.-',color=l.get_color())\n\n # Annotate\n plt.axhline(color='k') #zero displacement line\n plt.title(param)\n plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n plt.legend()\n\n if outname: plt.savefig(outname)\n if show: plt.show()", "def generate_visualization(\n datasets: Optional[List[str]] = None,\n *,\n dst_dir: str,\n) -> None:\n full_names = _get_full_names(datasets)\n generate_fn = functools.partial(\n _generate_single_visualization, dst_dir=dst_dir)\n logging.info(f'Generate figures for {len(full_names)} builders')\n with multiprocessing.Pool(WORKER_COUNT_DATASETS) as tpool:\n tpool.map(generate_fn, full_names)", "def create_dir(dir_path,plot_type):\n\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUZWXYZ\"\n alphabet += alphabet.lower()\n alphabet += \"01234567890\"\n\n\n if dir_path==None or dir_path=='':\n dir_path=''\n random_dir_name=''.join([choice(alphabet) for i in range(10)])\n dir_path ='./'+plot_type+strftime(\"%Y_%m_%d_%H_%M_%S\")+random_dir_name+'/'\n\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n return dir_path", "def generate_figures():\r\n # create results directory if necessary\r\n try:\r\n makedirs(\"results\")\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n \r\n for b in benchmarks:\r\n generate_figure(model[b], b)", "def plotfile(self):\r\n filename = self.locatefile()\r\n if filename == \"\":\r\n print \"\\nNo file was chosen, exiting ...\\n\"\r\n return\r\n else:\r\n print \"\\nXYZ Data file:\\n\" + filename\r\n \r\n print \"\\nReading XYZ data file....\"\r\n xyz = XYZImporter(filename)\r\n geodata = xyz.genericdata\r\n print \"FINISHED reading XYZ data file\"\r\n\r\n # Note PNG is only 8 bit, and so PDF has greater colour\r\n # depth \r\n print \"\\nAbout to render plot ...\"\r\n gp = GridPlotterCustom()\r\n gp.shownulls = False\r\n title = \"Plot of XYZ data file: \" + filename\r\n outfname = (filename.replace('.', '_') +\r\n '_PLOT_custom.pdf')\r\n gp.plotgeodata(geodata, title, outfname)\r\n print \"FINISHED rendering plot to:\\n\" + outfname\r\n print \"\\n\\n\"", "def _place_dataset(self, origin_file_path, out_dir):\n from distutils.dir_util import copy_tree\n\n # copy subdirectory example\n copy_tree(origin_file_path, out_dir)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n else:\n print_info(\"Found previous version at {}\".format(out_dir))\n return\n\n print_info(\"Writing data to {}...\".format(out_dir))", "def start_data_file(subject_id):\n \n #Check the data_file:\n \n list_data_dir = os.listdir('./data')\n\n i=1\n this_data_file = '%s_%s_%s_texture.csv'%(subject_id,\n time.strftime('%m%d%Y'),i)\n\n #This makes sure that you don't over-write previous data:\n while this_data_file in list_data_dir:\n i += 1\n this_data_file='%s_%s_%s_texture.csv'%(subject_id,\n time.strftime('%m%d%Y'),i)\n \n #Open the file for writing into:\n f = file('./data/%s'%this_data_file,'w')\n\n #Write some header information\n f.write('# Time : %s#\\n'%(time.asctime()))\n\n return f", "def __init__(self, dir_path, window_size,\n user_map_path, computer_map_path, auth_type_map_path, logon_type_map_path):\n logging.info(f\"Initiating Dataset instance for directory {dir_path}\")\n self.directory = dir_path\n self.filenames = [filename for filename in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, filename))]\n assert len(self.filenames) > 0\n random.shuffle(self.filenames)\n self.window_size = window_size\n self.len = self.count_len()\n self.user_map, self.user_count = util.load_mapping(user_map_path)\n self.computer_map, self.computer_count = util.load_mapping(computer_map_path)\n self.auth_type_map, self.auth_type_count = util.load_mapping(auth_type_map_path)\n self.logon_type_map, self.logon_type_count = util.load_mapping(logon_type_map_path)", "def visualize_from_file(\n ts_dir: str,\n filled_dir: str,\n figure_dir: str\n) -> None:\n df = pd.read_csv(\n ts_dir,\n index_col=0,\n header=0,\n parse_dates=[\"DATE\"],\n date_parser=lambda d: datetime.strptime(d, \"%Y-%m-%d\")\n ).replace(\".\", np.NaN).astype(np.float32)\n\n df_filled = pd.read_csv(\n filled_dir,\n index_col=0,\n header=0,\n parse_dates=[\"DATE\"],\n date_parser=lambda d: datetime.strptime(d, \"%Y-%m-%d\")\n ).replace(\".\", np.NaN).astype(np.float32)\n\n utils.visualize_interpolation(df, df_filled, figure_dir)", "def fromdirectory(directory):\n files = glob.glob(f\"{directory}/*.fits\")\n ret = LightCurve()\n ret.add_files(*(files)) \n return ret", "def plot(self, csvDataset = None):\n for item in self.data_array:\n item.plot()\n # If csvDataset is not None, plots also the file\n csvDataset.plot(sampleName=item.file_name)", "def setup_data_directory(dir_path):\n if exists(\"{}.db\".format(dir_path)):\n raise Exception(\"Simulation data directory {}.db already exists!\".format(dir_path))\n else:\n pass", "def main():\n style.use(\"ggplot\")\n start = datetime.datetime(2020, 1, 1)\n end = datetime.datetime(2020, 4, 17)\n\n create_csv(start, end)\n data_frame = read_csv()\n plot_data(data_frame)", "def synthetic_data(dataset, dirname, train_ratio=0.8, input_dim=10):\n # Define path where dataset should be saved\n data_path = \"data/{}.pth\".format(dataset)\n\n # If already created, do not recreate\n if os.path.exists(data_path):\n data = torch.load(data_path)\n\n else:\n # Construct graph\n if dataset == 'syn1':\n G, labels, name = gengraph.gen_syn1(\n feature_generator=featgen.ConstFeatureGen(np.ones(input_dim)))\n elif dataset == 'syn4':\n G, labels, name = gengraph.gen_syn4(\n feature_generator=featgen.ConstFeatureGen(np.ones(input_dim, dtype=float)))\n elif dataset == 'syn5':\n G, labels, name = gengraph.gen_syn5(\n feature_generator=featgen.ConstFeatureGen(np.ones(input_dim, dtype=float)))\n elif dataset == 'syn2':\n G, labels, name = gengraph.gen_syn2()\n input_dim = len(G.nodes[0][\"feat\"])\n\n # Create dataset\n data = SimpleNamespace()\n data.x, data.edge_index, data.y = gengraph.preprocess_input_graph(\n G, labels)\n data.x = data.x.type(torch.FloatTensor)\n data.num_classes = max(labels) + 1\n data.num_features = input_dim\n data.num_nodes = G.number_of_nodes()\n data.name = dataset\n\n # Train/test split only for nodes\n data.train_mask, data.val_mask, data.test_mask = split_function(\n data.y.numpy(), train_ratio)\n\n # Save data\n torch.save(data, data_path)\n\n return data", "def editdirectory(self):\n\n ## Have user select existing directory\n new_directory = str(QtGui.QFileDialog.getExistingDirectory(self, \"Select Directory\",\n '/home/lsst/Data/'))\n\n ## If return is not NULL, set the DATA_DIRECTORY and update filename\n if new_directory:\n\n try:\n os.makedirs(new_directory)\n except OSError:\n if not os.path.isdir(new_directory):\n self.logger.exception(\"An error occurred while creating a new directory.\")\n\n global DATA_DIRECTORY\n DATA_DIRECTORY = new_directory\n self.displaydirectory()\n self.logger.info(\"Data directory changed to {0}.\".format(new_directory))", "def test_get_denoiser_data_dir(self):\r\n\r\n obs = get_denoiser_data_dir()\r\n\r\n self.assertTrue(exists(obs))\r\n self.assertTrue(exists(obs + 'FLX_error_profile.dat'))", "def loop_dir(dir_name: str, graph_ext: str) -> None:\n directory = fsencode(dir_name)\n for file in listdir(directory):\n filename = fsdecode(file)\n if filename.endswith(graph_ext):\n draw_graph(filename)", "def from_directory(cls, dirpath, verbose=True):\n if dirpath[-1] == '/':\n dirpath = dirpath[:-1]\n mesh_hash = os.path.splitext(os.path.basename(dirpath))[0]\n prepath = dirpath[:dirpath.rfind(mesh_hash)]\n assert prepath[-1] == '/'\n prepath = prepath[:-1]\n split, synset = prepath.split('/')[-2:]\n ex = cls(split=split, synset_or_cat=synset,\n mesh_hash=mesh_hash, dynamic=True, verbose=verbose)\n # pylint: disable=protected-access\n ex._tx_path = f'{dirpath}/orig_to_gaps.txt'\n ex._dodeca_depth_and_normal_path = f'{dirpath}/depth_and_normals.npz'\n ex._gt_path = f'{dirpath}/mesh_orig.ply'\n ex._directory_root = dirpath\n ex._grid_path = f'{dirpath}/coarse_grid.grd'\n # pylint: enable=protected-access\n ex.precomputed_surface_samples_from_dodeca_path = (\n f'{dirpath}/surface_samples_from_dodeca.pts'\n )\n ex.is_from_directory = True\n return ex", "def gen_data_dir(img_dir, id_label_dict, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n return gen_data_file(img_file_path, id_label_dict, num_class)", "def main():\n parser = argparse.ArgumentParser(description=\n ('Plot and save data from DS9 projections. If only labels are '\n 'given, it will just plot existing dat files.'\n 'Other regions/objects in file are ignored.'))\n\n parser.add_argument('infile', help='Input DS9 region file (fk5 coords)')\n parser.add_argument('datroot', help='Stem for output profile data')\n\n parser.add_argument('-p', '--pltroot', help='Stem for output plots')\n parser.add_argument('-f', '--files', help=('Files (.fits) to be processed,'\n ' one for each energy band'),\n nargs='*')\n\n parser.add_argument('-l', '--labels', help='Band labels for plots/files',\n nargs='+')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='verbose mode')\n parser.add_argument('-s', '--subplot', action='store_true',\n help='Generate subplot plots')\n\n # Parse arguments\n args = parser.parse_args()\n regions_file = args.infile\n pltroot, datroot = args.pltroot, args.datroot\n bands, labels = args.files, args.labels # NOTE bands = args.files...\n verbose, subplot = args.verbose, args.subplot\n\n # Check arguments for sanity\n # If -p is supplied, output plots\n # If -b is supplied, output data\n if bands is None:\n if not pltroot:\n parser.error('Nothing to be done! Need bands or pltroot.')\n else:\n print 'No bands supplied, generating plots only'\n else:\n if len(bands) != len(labels):\n raise ValueError(('Non-zero number of bands '\n 'does not match number of labels!'))\n elif not pltroot:\n print 'No pltroot supplied, generating data only'\n\n # Parse projections\n rspecs = get_projection_params(regions_file)\n if verbose:\n print 'Projections parsed from region file'\n\n # Generate data files, if desired\n if bands is not None:\n regparse.check_dir(datroot, verbose) # only create if saving data\n generate_dat_files(rspecs, datroot, bands, labels)\n\n # Generate plots, if desired\n if pltroot:\n regparse.check_dir(pltroot, verbose)\n generate_plots(rspecs, datroot, pltroot, labels, verbose, subplot)\n\n if verbose:\n print 'Done!'", "def publish_data(username):\n x1 = []\n x2 = []\n y1 = []\n y2 = []\n\n for point_set in __data:\n x1.append(point_set[0][0])\n y1.append(point_set[0][1])\n\n x2.append(point_set[1][0])\n y2.append(point_set[1][1])\n\n figure = plt.figure()\n plt.plot(x1, y1, label='Atrium')\n plt.plot(x2, y2, label='Ventrical')\n plt.xlabel('Time (ms)')\n plt.ylabel('Voltage (V)')\n plt.title(\"'{0}' Live Egram Data\".format(username))\n plt.legend()\n\n timestamp = datetime.datetime.now().strftime(Config.getInstance().get('Database', 'db.timestamp')).replace(' ', '_').replace('/', '-').replace(':', '-')\n graph_doc_name = \"{0}_Live_Egram_Data_From_{1}.pdf\".format(username, timestamp)\n pp = PdfPages(os.path.join(parentfolder, 'downloads', graph_doc_name))\n pp.savefig(figure)\n pp.close()\n\n csv_output = list(zip(x1, y1, x2, y2))\n\n csv_doc_name = \"{0}_Live_Egram_Data_From_{1}.csv\".format(username, timestamp)\n with open(os.path.join(parentfolder, 'downloads', csv_doc_name), 'w') as file:\n writer = csv.writer(file)\n writer.writerow(['Atrium Timestamp', 'Atrium Value', 'Ventrical Timestamp', 'Ventrical Value'])\n for line in csv_output:\n writer.writerow(line)", "def plot(self, job):\n # fill PlotJob with needed data if it doesn't exist\n # Plotter will look for the files it needs relative to the work directory\n # If this fails it will fall back to a baseline location if one was \n # Provided to cmake at the time this file was generated\n if job.dataPath == None :\n job.dataPath = \"Scenarios/\" + job.verificationDirectory + \"/baselines/\"\n \n if job.dataFile == None:\n job.dataFile = job.name + \"Results.zip\"\n \n if job.outputFilename==None:\n job.outputFilename=job.titleOverride+\".jpg\"\n \n if len(job.outputFilename.split(\".\"))==1:\n job.outputFilename+=\".jpg\"\n \n if job.imageWidth==None and job.imageHeight==None:\n job.imageWidth=1600\n job.imageHeight=800\n \n if not os.path.exists(job.dataPath):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not os.path.isfile(os.path.join(job.dataPath,job.dataFile)):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not job.fontSize:\n job.fontSize=22\n \n if not os.path.exists(os.path.dirname(job.outputDir)):\n os.mkdir(os.path.dirname(job.outputDir))\n \n self.drawgraph(job,os.path.join(job.dataPath,job.dataFile),os.path.join(job.outputDir,job.outputFilename))", "def ensure_data_folder_existence() -> None:\n folder_name = params.DATA_FOLDER_NAME\n if not folder_name in os.listdir('.'):\n os.mkdir(folder_name)", "def generate_dat_files(rspecs, datroot, bands, labels):\n d = ds9.ds9()\n d.set('rgb')\n d.set('rgb red')\n\n # Save plaintext projection data\n # Idea: minimize file (band) loading operations\n for fname, flab in zip(bands, labels):\n d.set('file ' + fname) # Load a band\n for i in xrange(len(rspecs)):\n d.set('regions', rspecs[i]) # Load a region\n d.set('rgb red') # Plot projection data\n dat_fname = '{0}_{1:02d}_band_{2}.dat'.format(datroot, i+1, flab)\n d.set('plot {0} save {1}'.format(d.get('plot'), dat_fname))\n d.set('regions delete all')\n d.set('exit')", "def generate_plots(path):\n videos = glob(path + '/*.mkv')\n print(path, len(videos), videos)\n\n if len(videos) == 0:\n return\n else:\n videos = videos[0]\n\n metadata_list = glob(path + '/metadata.txt')\n #print(path, len(metadata_list), metadata_list)\n\n if len(metadata_list) == 0:\n return \n\n P = Preprocessor()\n P.import_video(str(videos))\n P.read_metadata(path)\n P.preprocess()\n Im = P.frames_processed\n if len(Im) == 0:\n print(len(Im))\n return\n\n z_start = P.z_start\n z_end = P.z_end\n\n mean, cov = analyze_image(Im)\n\n window_size = 10\n mean_smoothed = smoothing.mean_moving_average(mean, window_size)\n cov_smoothed = smoothing.cov_moving_average(cov, window_size)\n\n c = CubicFitRotated()\n c.fit(mean=mean_smoothed, cov=cov_smoothed, z_start=z_start, z_end=z_end)\n\n try:\n os.mkdir(path + '/analysis')\n path += '/analysis'\n except OSError:\n pass\n\n\n plots.plot_mean(mean, z_start, z_end).savefig(path + '/beam_center.png')\n plots.plot_beta(cov, z_start, z_end).savefig(path + '/sigma_squared.png')\n\n export.export_mean(mean = mean, filename = path + '/center.csv', z_start = z_start, z_end = z_end)\n export.export_cov(cov = cov, filename = path + '/cov.csv', z_start = z_start, z_end = z_end)\n\n plt.close('all')", "def plotOceanParcelsAccumulatedResults(input_data_folder, output_folder, start_year, end_year, dt=1):\n # Only for\n tot_days = (end_year-start_year)*365\n start_date = datetime.strptime(str(start_year),'%Y')\n\n open_files = []\n for c_day in np.arange(0, tot_days, dt):\n print(F\"------- {c_day}---------\")\n # Released months\n c_date = start_date + timedelta(days=int(c_day))\n months = (c_date.year - start_date.year)*12 + c_date.month - start_date.month\n\n # Iterate over all the files that should contribute to the image\n fig = plt.figure(figsize=(20,10))\n ax = plt.subplot(1, 1, 1, projection=ccrs.PlateCarree())\n for c_month in range(0, months + 1):\n c_file_year = (start_date + relativedelta(months=int(c_month))).year\n c_file_month = (start_date + relativedelta(months=int(c_month))).month\n skip_days = c_day - (c_date - datetime.strptime(F\"{c_file_year}-{c_file_month}\",'%Y-%m')).days\n\n if len(open_files) <= c_month:\n file_name = F\"TenYears_YesWinds_YesDiffusion_NoUnbeaching_{c_file_year}_{(c_file_month):02d}.nc\"\n print(F\"Reading new file: {file_name}\")\n open_files.append(Dataset(join(input_data_folder, file_name), \"r\", format=\"NETCDF4\"))\n\n c_time_step = c_day - skip_days\n # lats = open_files[c_month].variables['lat'][:,c_time_step]\n # lons = open_files[c_month].variables['lon'][:,c_time_step]\n ax.scatter(open_files[c_month].variables['lon'][:,c_time_step], open_files[c_month].variables['lat'][:,c_time_step], color='c', s=1)\n\n title = F\"{start_date.strftime('%Y-%m-%d')} - {c_date.strftime('%Y-%m-%d')}\"\n ax.coastlines()\n ax.set_title(title, fontsize=30)\n\n # plt.show()\n plt.savefig(F\"{output_folder}/{start_date.strftime('%Y_%m')}_{c_day:04d}.png\")\n plt.close()", "def collect_data(self, src_directory=None,src_filename_format=None,\n date_selection=None,units=None,exposure_schedule=None,bin_width=None) :\n\n # TODO: There must be a better way to do this\n if not (src_directory is None) :\n self.src_directory = src_directory\n if not (src_filename_format is None) :\n self.src_filename_format = src_filename_format\n if not (date_selection is None) :\n self.date_selection = date_selection\n if not (units is None) :\n self.units = units\n if not (exposure_schedule is None) :\n self.exposure_schedule = exposure_schedule\n if not (bin_width is None) :\n self.bin_width = bin_width\n\n # first we read the src_directory to check the total number of unique years available\n data_dir_contents = os.listdir(self.src_directory)\n # TODO: improve jankiness of this format-matching search for filenames\n char_year = self.src_filename_format.find('yyyy')\n dataset_years = [ x for x in data_dir_contents if re.findall(self.src_filename_format.replace(\"yyyy\",\"[0-9]{4}\"),x)]\n dataset_years = [ int(x[char_year:char_year+4]) for x in dataset_years ]\n\n # Now we can handle default options like \"all\"\n if type(self.date_selection) == str and self.date_selection == \"all\" :\n date_selection = pd.date_range(start=str(dataset_years[0])+\"-01-01\",\n end=str(dataset_years[-1])+\"-12-31\")\n else :\n date_selection = self.date_selection # TODO: much more interpretation options here\n\n #now we find unique years \n list_of_years = sorted(set(date_selection.year))\n\n for i in range(len(list_of_years)) :\n year = list_of_years[i]\n print(\"Processing year \"+str(year)) #should use logging, don't yet know how\n dataset=nc.Dataset(self.src_directory+self.src_filename_format.replace('yyyy',str(year))) \n dataset.set_auto_mask(False) #to get normal arrays (faster than default masked arrays)\n\n if dataset.dimensions['time'].size == 24 :\n # needed if just a single day\n time_subset = [True for i in range(dataset.dimensions['time'].size)]\n else :\n # Next we pull a subset from the netCDF file\n # declare false array with same length of time dimension from netCDF\n time_subset = [False for i in range(dataset.dimensions['time'].size)] \n # reshape false array to have first dimension 24 (hours in day)\n time_subset = assert_data_shape_24(time_subset) \n # set the appropriate days as true\n time_subset[:,date_selection[date_selection.year == year].dayofyear-1] = True \n # flatten time_subset array back to one dimension\n time_subset = time_subset.flatten(order='F')\n\n # load subset of data\n print(\" Slicing netcdf data with time subset\")\n data = dataset['UV_AS'][time_subset,:,:] #work in UVI by default because it's easy to read\n # TODO: check units of dataset files, CF conventions for UVI or W/m2\n\n # now to calculate doses if requested\n if self.units in [\"SED\",\"J m-2\",\"UVIh\"] :\n # if calculating doses\n print(' Calculating doses')\n data = assert_data_shape_24(data)\n data = np.sum(np.reshape(self.exposure_schedule,[24,1,1,1]) * data,axis=0)\n\n elif (self.exposure_schedule != np.ones(24)).any() :\n # assume elsewise calculating intensity (i.e. UV-index) then limit data selection according\n # to schedule (remembering that default schedule is just ones)\n print(' Slicing data with exposure schedule')\n # reshape so first dimension is 24 hours\n data = assert_data_shape_24(data)\n # select only those hours with nonzero entry in exposure schedule\n data = data[self.exposure_schedule != 0,:,:,:]\n # select nonzero values from exposure schedule\n exposure_schedule_nonzero = self.exposure_schedule[self.exposure_schedule != 0]\n\n # if any nonzero entries aren't 1, multiply data accordingly\n if (exposure_schedule_nonzero != 1).any() :\n data *= np.reshape(exposure_schedule_nonzero,[len(exposure_schedule_nonzero),1,1,1])\n\n # recombine first two dimensions (hour and day) back into time ready for histogram\n data = assert_data_shape_24(data,reverse=True) \n\n # now multiply data by conversion factor according to desired untis\n # TODO: Should expand upon this in reference files\n data *= {\"SED\":0.9, \"J m-2\":90, \"UVIh\":1, \"UVI\":1, \"W m-2\":0.025, \"mW m-2\":25}[self.units]\n\n # if this is the first iteration, declare a hist\n if i == 0 :\n # seems like useful metadata to know bin n and edges\n # TODO: reconsider where this belongs in the code (__init__?)\n self.num_bins = int(np.nanmax(data) // self.bin_width ) + 2\n self.bin_edges = (np.array(range(self.num_bins+1)) - 0.5) * self.bin_width \n # this form allows for weird custom bin edges, but probably will never use that\n self.bin_centers = self.bin_edges[:-1] + 0.5 * np.diff(self.bin_edges)\n\n # TODO: think about possible cases where dimensions could differ\n self.pix_hist=np.zeros([self.num_bins,\n np.shape(data)[-2],np.shape(data)[-1]], dtype=np.int16)\n\n # TODO: this should also be done by some initial dataset analysis, but that's a drastic\n # design overhaul\n self.lat = dataset['lat'][:]\n self.lon = dataset['lon'][:]\n\n else :\n new_num_bins = int(np.nanmax(data) // self.bin_width) + 2 - self.num_bins\n # check if new data requires extra bins in pix_hist\n if new_num_bins > 0 :\n # append zeros to pix hist to make room for larger values\n self.pix_hist = np.concatenate((self.pix_hist,np.zeros(\n [new_num_bins,np.shape(self.pix_hist)[-2],np.shape(self.pix_hist)[-1]],\n dtype=np.int16)),axis=0)\n # update bin information\n self.num_bins = self.num_bins + new_num_bins\n self.bin_edges = (np.array(range(self.num_bins+1)) - 0.5) * self.bin_width \n self.bin_centers = self.bin_edges[:-1] + 0.5 * np.diff(self.bin_edges)\n\n # TODO: Add check in case bins get \"full\" (i.e. approach int16 max value)\n # now put data into hist using apply_along_axis to perform histogram for each pixel\n print(\" Calculating and adding to pixel histograms\")\n self.pix_hist[:,:,:] += np.apply_along_axis(lambda x: \n np.histogram(x,bins=self.bin_edges)[0],0,data)\n\n return self", "def prepare_data_dir(create_valid=DEFAULT_CREATE_VALID, valid_size=DEFAULT_VALID_SIZE) -> None:\n if create_valid and not os.path.isdir(VALID_PATH):\n print('Validation set not found, creating.')\n\n os.mkdir(VALID_PATH)\n\n for d in os.listdir(TRAIN_PATH):\n from_path = TRAIN_PATH + '/' + d\n to_path = VALID_PATH + '/' + d\n\n os.mkdir(to_path)\n\n files = os.listdir(TRAIN_PATH + '/' + d)\n random.shuffle(files)\n\n for f in files[:valid_size]:\n os.rename(from_path + '/' + f, to_path + '/' + f)\n\n elif not create_valid and os.path.isdir(VALID_PATH):\n print('Running without validation set, but ' + VALID_PATH + 'dir found. Recreate data dir before running.')\n exit(1)", "def _onLoad1DData(self, event):\r\n path = None\r\n dlg = wx.FileDialog(self, \"Choose a file\", os.getcwd(), \"\", \"*.txt\", wx.OPEN)\r\n if dlg.ShowModal() == wx.ID_OK:\r\n path = dlg.GetPath()\r\n mypath = os.path.basename(path)\r\n print mypath\r\n dlg.Destroy()\r\n\r\n file_x = []\r\n file_y = []\r\n file_dy = []\r\n file_dx = []\r\n if not path == None:\r\n self.path =path\r\n input_f = open(path,'r')\r\n buff = input_f.read()\r\n lines = buff.split('\\n')\r\n for line in lines:\r\n try:\r\n toks = line.split()\r\n x = float(toks[0])\r\n y = float(toks[1])\r\n #dx = math.sqrt(x)\r\n dx=1/x\r\n if dx >= x:\r\n dx = 0.9*x\r\n #dy = math.sqrt(y)\r\n dy=1/y\r\n if dy >= y:\r\n dy = 0.9*y\r\n file_x.append(x)\r\n file_y.append(y)\r\n file_dy.append(dy)\r\n file_dx.append(dx)\r\n\r\n except:\r\n print \"READ ERROR\", line\r\n\r\n # Sanity check\r\n if not len(file_x) == len(file_dx):\r\n raise ValueError, \"X and dX have different length\"\r\n if not len(file_y) == len(file_dy):\r\n raise ValueError, \"y and dy have different length\"\r\n # reset the graph before loading\r\n self.graph.reset()\r\n self.file_data.x = file_x\r\n self.file_data.y = file_y\r\n self.file_data.dy = file_dy\r\n #self.file_data.dy = None\r\n\r\n #self.file_data.dx = file_dx\r\n self.file_data.dx = None\r\n\r\n self.file_data.reset_view()\r\n\r\n self.file_data.name = \"Loaded 1D data\"\r\n self.graph.xaxis('\\\\rm{q} ', 'A^{-1}')\r\n self.graph.yaxis(\"\\\\rm{Intensity} \",\"cm^{-1}\")\r\n\r\n # Set the scale\r\n self.set_yscale('log')\r\n self.set_xscale('linear')\r\n #Add the default transformation of x and y into Property Dialog\r\n if self.get_xscale()=='log':\r\n xtrans=\"Log(x)\"\r\n if self.get_xscale()=='linear':\r\n xtrans=\"x\"\r\n if self.get_yscale()=='log':\r\n ytrans=\"Log(y)\"\r\n if self.get_yscale()=='linear':\r\n ytrans=\"y\"\r\n self.setTrans(xtrans,ytrans)\r\n\r\n #Plot the data\r\n self.graph.add(self.file_data)\r\n self. _onEVT_FUNC_PROPERTY()\r\n\r\n #self.graph.render(self)\r\n #self.subplot.figure.canvas.draw_idle()\r", "def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)", "def show_data_files(self):\n for idx in self.plot_data:\n self.plot_data[idx].show()", "def loadAndPlot1DMassData(dataFile='movingPointMassData/testPointMassData000.pkl'):\n # Load the data back\n inputDataFile = open(dataFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Iterate over the different saved trajectores and plot out the results.\n for i in range(len(dataOut[0])):\n plt.figure(i)\n plt.plot(dataOut[0][i][1],dataOut[0][i][0])\n plt.show()", "def make_all_charts(data, dir_path, filename, num_categories, colorby, args,\r\n color_data, prefs, background_color, label_color,\r\n chart_type, generate_image_type, plot_width, plot_height,\r\n bar_width, dpi, resize_nth_label, label_type,\r\n include_html_legend, include_html_counts):\r\n\r\n # iterate over the preferences and assign colors according to taxonomy\r\n img_data = []\r\n for label, f_name in data:\r\n raw_fpath = os.path.join(\r\n dir_path,\r\n 'raw_data',\r\n os.path.split(f_name)[-1])\r\n # move raw file to output directory\r\n shutil.copyfile(f_name, raw_fpath)\r\n\r\n f = color_data['counts'][f_name]\r\n level = max([len(t.split(';')) - 1 for t in f[1]])\r\n\r\n for key in prefs.keys():\r\n if prefs[key]['column'] != str(level):\r\n continue\r\n col_name = 'Taxon'\r\n mapping = [['Taxon']]\r\n mapping.extend([[m] for m in f[1]])\r\n if 'colors' in prefs[key]:\r\n if isinstance(prefs[key]['colors'], dict):\r\n pref_colors = prefs[key]['colors'].copy()\r\n # copy so we can mutate\r\n else:\r\n pref_colors = prefs[key]['colors'][:]\r\n else:\r\n pref_colors = {}\r\n labelname = prefs[key]['column']\r\n\r\n # Define groups and associate appropriate colors to each group\r\n groups = group_by_field(mapping, col_name)\r\n pref_colors, data_colors, data_color_order = \\\r\n get_group_colors(groups, pref_colors)\r\n\r\n updated_pref_colors = {}\r\n\r\n if chart_type == 'area' and len(f[0]) == 1:\r\n raise ValueError(\r\n 'When generating area charts, the number of samples (or category values) must be greater than 1. However, you can still produce a pie chart or bar chart with only 1 sample (or category value), but you must remove the area chart value from the input arguments.')\r\n\r\n for key in pref_colors:\r\n updated_pref_colors[key.replace('\"', '')] = pref_colors[key]\r\n\r\n for i, val in enumerate(f[1]):\r\n f[1][i] = val.replace('\"', '')\r\n\r\n # parse the counts and continue processing\r\n img_data.extend(get_counts(label.strip(), colorby, num_categories,\r\n dir_path, level, f, prefs, updated_pref_colors,\r\n background_color,\r\n label_color, chart_type, generate_image_type,\r\n plot_width, plot_height, bar_width, dpi, raw_fpath,\r\n resize_nth_label, label_type, include_html_legend,\r\n include_html_counts))\r\n\r\n # generate html filepath\r\n outpath = os.path.join(dir_path, '%s_charts.html' % chart_type)\r\n out_table = ''.join(img_data)\r\n # write out html file\r\n write_html_file(out_table, outpath)", "def set_data_directory(path):\n gdc19.DATA_DIRECTORY = path\n return gdc19.DATA_DIRECTORY", "def __init__(self, directory):\n self._path = os.path.join(\"../../datasets\", directory)\n self.airlines = pd.read_csv(os.path.join(self._path, 'airlines.csv'))\n self.airports = pd.read_csv(os.path.join(self._path, 'airports.csv'))\n self.planes = pd.read_csv(os.path.join(self._path, 'planes.csv'))\n self.countries = pd.read_csv(os.path.join(self._path, 'countries.csv'))\n self.routes = pd.read_csv(os.path.join(self._path, 'routes.csv'))\n self._CreateGraph()", "def plot_data(data, par, par_names, par_fixed, output_dir='./'):\n\n datasets = dict()\n\n for data_point in data:\n experiment_name = data_point.par['experiment_name']\n datasets.setdefault(experiment_name, list()).append(data_point)\n\n for experiment_name, dataset in datasets.items():\n\n # ##### Matplotlib ######\n\n name_pdf = ''.join([experiment_name, '.pdf'])\n name_pdf = os.path.join(output_dir, name_pdf)\n\n name_txt = ''.join([experiment_name, '.fit'])\n name_txt = os.path.join(output_dir, name_txt)\n\n print(\" * {} [.fit]\".format(name_pdf))\n\n # #######################\n\n data_grouped = group_data(dataset)\n profiles, r2_min, r2_max = compute_profiles(data_grouped)\n ymin, ymax = set_lim([r2_min, r2_max], 0.10)\n\n with PdfPages(name_pdf) as file_pdf, open(name_txt, 'w') as file_txt:\n\n for (_index, id_), profile in sorted(profiles.items()):\n write_profile(id_, profile, file_txt)\n\n ###### Matplotlib ######\n\n fig = plt.figure(1, frameon=True)\n ax = fig.add_subplot(111)\n\n ax.axhline(0, color='black', alpha=0.87)\n\n ########################\n\n frq, r2_cal, r2_exp, r2_erd, r2_eru = profile[0]\n\n ax.plot(\n frq,\n r2_cal,\n linestyle='-',\n color=red200,\n zorder=2,\n )\n\n ax.errorbar(\n frq,\n r2_exp,\n yerr=[r2_erd, r2_eru],\n fmt='o',\n color=red500,\n zorder=3,\n )\n\n xmin, xmax = set_lim(frq, 0.10)\n\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n\n ax.xaxis.set_major_locator(MaxNLocator(6))\n ax.yaxis.set_major_locator(MaxNLocator(6))\n\n ax.set_xlabel(r'$\\mathregular{\\nu_{CPMG} \\ (Hz)}$')\n ax.set_ylabel(\n r'$\\mathregular{R_{2,eff} \\ (s^{-1})}$')\n\n ax.set_title('{:s}'.format(id_.upper()))\n\n fig.tight_layout()\n\n ########################\n\n file_pdf.savefig()\n plt.close()\n\n ########################\n\n return", "def __init__(self, output_directory: str, step: int = 100, **kwargs) -> None:\n self.directory = output_directory\n self.plot_front = Plot(title=\"Pareto front approximation\", **kwargs)\n self.last_front = []\n self.fronts = []\n self.counter = 0\n self.step = step\n\n if Path(self.directory).is_dir():\n LOGGER.warning(\"Directory {} exists. Removing contents.\".format(self.directory))\n for file in os.listdir(self.directory):\n os.remove(\"{0}/{1}\".format(self.directory, file))\n else:\n LOGGER.warning(\"Directory {} does not exist. Creating it.\".format(self.directory))\n Path(self.directory).mkdir(parents=True)", "def generate_plots():\n\n hmp = homemonitor_plot()\n hmp.load_data()\n hmp.plot_day()\n hmp.plot_hist()", "def new_dataset(args):\n if not args.args:\n raise ParserError('you must specify an existing directory')\n outdir = Path(args.args.pop(0))\n if not outdir.exists():\n raise ParserError('you must specify an existing directory')\n\n id_pattern = re.compile('[a-z_0-9]+$')\n md = {}\n if args.args:\n md['id'] = args.args.pop(0)\n else:\n md['id'] = input('Dataset ID: ')\n\n while not id_pattern.match(md['id']):\n print('dataset id must only consist of lowercase ascii letters, digits and _ (underscore)!')\n md['id'] = input('Dataset ID: ')\n\n outdir = outdir / md['id']\n if not outdir.exists():\n outdir.mkdir()\n\n for key in ['title', 'url', 'license', 'conceptlist', 'citation']:\n md[key] = input('Dataset {0}: '.format(key))\n\n # check license!\n # check conceptlist!\n\n for path in Path(pylexibank.__file__).parent.joinpath('dataset_template').iterdir():\n if path.is_file():\n if path.suffix in ['.pyc']:\n continue # pragma: no cover\n target = path.name\n content = read_text(path)\n if '+' in path.name:\n target = re.sub(\n '\\+([a-z]+)\\+',\n lambda m: '{' + m.groups()[0] + '}',\n path.name\n ).format(**md)\n if target.endswith('_tmpl'):\n target = target[:-5]\n content = content.format(**md)\n write_text(outdir / target, content)\n else:\n target = outdir / path.name\n if target.exists():\n shutil.rmtree(str(target))\n shutil.copytree(str(path), str(target))\n del md['id']\n jsonlib.dump(md, outdir / 'metadata.json', indent=4)", "def generate_plots(self, input_data, input_labels=None):\n pass", "def source_data_files(self, data_dir, tmp_dir, dataset_split):\n raise NotImplementedError()", "def load_or_generate_data(self) -> None:\n x = np.linspace(0, 10, self.n_samples).reshape(-1, 1)\n y_sin = np.sin(x * 1.5)\n noise = np.random.randn(*x.shape)\n y = (y_sin + noise).reshape(x.shape[0], 1)\n self.x, self.y = x, y", "def generatePlot(data):\n addendum = \"\"\n destination = \"D:\\\\Research\\\\scripts\\\\Results\\\\FullSet1\\\\$FilteredPlots\\\\take 4\\\\\"\n if len(data.detections.smallIncrease) != 0:\n addendum = \"small increases\\\\\"\n if len(data.detections.smallDecrease) != 0:\n addendum = \"small decreases\\\\\"\n if len(data.detections.largeIncrease) != 0:\n addendum = \"large increases\\\\\"\n if len(data.detections.largeDecrease) != 0:\n addendum = \"large decreases\\\\\"\n if addendum == \"\":\n addendum = \"no decreases\\\\\"\n \n plt.figure(1)\n plt.subplot(211)\n #print np.min(data.magdata), np.max(data.magdata)\n axes = plt.gca()\n axes.set_title(\"Year: '{year}, Day: {day}\".format(year=data.calendarDay[:2], day=data.calendarDay[3:] ))\n axes.set_ylim([np.min(data.magdata)-1.2,np.max(data.magdata)+0.25])\n axes.set_ylabel(r'$\\mathbf{B}$ (nT)' )\n\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes.xaxis.set_major_locator(dates.MinuteLocator())\n axes.xaxis.set_major_formatter(formats)\n \n br, = pp.plot(dates.date2num(data.timestamps),[row[0] for row in data.magdata],label='$B_r$')\n bt, = pp.plot(dates.date2num(data.timestamps),[row[1] for row in data.magdata],label='$B_t$')\n bn, = pp.plot(dates.date2num(data.timestamps),[row[2] for row in data.magdata],label='$B_n$')\n b0, = pp.plot(dates.date2num(data.timestamps),[row[3] for row in data.magdata],label='$B_0$')\n print len(data.detections.rotationBoundary)\n if len(data.detections.rotationBoundary) == 1:\n rotation, = pp.plot([dates.date2num(data.detections.rotationBoundary), dates.date2num(data.detections.rotationBoundary)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n else:\n for index, value in enumerate(data.detections.rotationBoundary):\n rotation, = pp.plot([dates.date2num(value), dates.date2num(value)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n if len(data.detections.rotationBoundary) != 0:\n pp.legend(handles=[br,bt,bn,b0,rotation], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n else:\n pp.legend(handles=[br,bt,bn,b0], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n\n start, end = axes.get_xlim()\n axes.xaxis.set_ticks(np.arange(start, end, (end-start)/5))\n \n \n\n plt.subplot(212)\n axes2 = plt.gca()\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes2.xaxis.set_major_locator(dates.MinuteLocator())\n axes2.xaxis.set_major_formatter(formats)\n axes2.set_ylabel(r'$\\theta$ (deg)' )\n rotations, = pp.plot(dates.date2num(data.detections.rotationTimeTags),data.detections.rotations)\n #pp.legend(handles=[rotations], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n \n\n outplotname = 'Plot ' + str(len(os.listdir(destination+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + '.pdf'\n completename = os.path.join(destination+addendum,outplotname)\n plt.savefig(completename, bboxinches='tight')\n plt.clf()\n\n outplotname = 'Plot ' + str(len(os.listdir(destination+'rawdata\\\\'+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + ' rawdata.csv'\n completename1 = os.path.join(destination+'rawdata\\\\'+addendum,outplotname)\n generateDataFile(data.rawdata,completename1)\n\n print \"Done generating plot...\"", "def make_directory_dataset(directory_path: str) -> None:\n dataset_path = Path(directory_path)\n test_path = dataset_path / 'test'\n train_path = dataset_path / 'train'\n\n if not dataset_path.is_dir():\n dataset_path.mkdir()\n\n if not test_path.is_dir():\n test_path.mkdir()\n\n if not train_path.is_dir():\n train_path.mkdir()", "def __init__(self, datafiles, plotter):\n self.datafiles = datafiles\n self.datasets = dict()\n self.plotter = plotter", "def plot_data(true_data, fake_data, out_name, out_path, plot_3d=False): \n with torch.no_grad():\n\n plt.clf()\n \n if not plot_3d:\n fig, ax = plt.subplots(1, 1, figsize=(6, 4))\n plt.scatter(true_data[:, 0], true_data[:, 1], c='orange', label='Real data')\n ax.legend()\n plt.savefig(out_path + 'real' + out_name + '.jpg')\n fig, ax = plt.subplots(1, 1, figsize=(6, 4))\n plt.scatter(fake_data[:, 0], fake_data[:, 1], c='green', label='Generated data')\n ax.legend()\n plt.savefig(out_path + 'fake' + out_name + '.jpg')\n else:\n fig = plt.figure(figsize=(4, 4))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(true_data[:, 0], true_data[:, 1], true_data[:, 2], c='orange', label='Real data')\n ax.legend()\n plt.savefig(out_path + 'real' + out_name + '.jpg')\n fig = plt.figure(figsize=(4, 4))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(fake_data[:, 0], fake_data[:, 1], fake_data[:, 2], c='green', label='Generated data')\n ax.legend()\n plt.savefig(out_path + 'fake' + out_name + '.jpg')", "def scatter(filename, data, lines=[]):\n import matplotlib.pyplot as plot\n plot.figure(random.randint(0, 10000000))\n plot.scatter(data[0], data[1], 20, 'b', 'o')\n plot.title(filename.split('.')[0])\n for line in lines:\n plot.plot([line[0], line[2]], [line[1], line[3]], '-')\n plot.savefig(filename)", "def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)", "def plot_and_save(data, prefix, name):\n plt.figure()\n plt.hist(data)\n plt.title(name)\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n plt.savefig(prefix + name + '.png')\n plt.close()" ]
[ "0.61566585", "0.61496145", "0.59893596", "0.5787162", "0.57870215", "0.5718453", "0.57132596", "0.56776357", "0.56348133", "0.56186616", "0.56014526", "0.5600899", "0.55958813", "0.55899835", "0.5586797", "0.555859", "0.5540925", "0.55300957", "0.552718", "0.55126595", "0.5480727", "0.54774237", "0.54577386", "0.54325444", "0.5432521", "0.54154235", "0.541319", "0.5406593", "0.5388614", "0.53846616", "0.53685826", "0.5366022", "0.53562105", "0.53542906", "0.5341142", "0.5340969", "0.53405404", "0.53283614", "0.5324644", "0.5309101", "0.5308617", "0.5302224", "0.5293651", "0.52922386", "0.5292167", "0.52895755", "0.5280521", "0.52788967", "0.5270485", "0.52702767", "0.5259383", "0.525833", "0.5254746", "0.5251819", "0.52510935", "0.5248963", "0.5241925", "0.52344584", "0.52343774", "0.52224994", "0.5219697", "0.5217367", "0.52115816", "0.52108365", "0.5209917", "0.52091", "0.51982355", "0.51966745", "0.5187102", "0.5185259", "0.51778907", "0.5163785", "0.5157514", "0.51531416", "0.5150872", "0.5149904", "0.51487356", "0.5147992", "0.5134493", "0.51315355", "0.5131146", "0.5130919", "0.5130868", "0.5129968", "0.5124819", "0.51213443", "0.51207745", "0.51197404", "0.51168954", "0.5113792", "0.5112305", "0.51106346", "0.5107451", "0.5106833", "0.51036036", "0.5099796", "0.50952506", "0.509136", "0.5084876", "0.5083914" ]
0.5090048
98
Push the item in the front of the deque
def enqueue_front(self, item): self._items.insert(0, item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push(self, item):\n super().add_item_to_front(item)", "def push_front(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_front()\n self.data_[self.front_]= e#New Front\n self.size_+=1\n # print(\"Case 1\")\n elif(self.front_ == -1 and self.size_ ==0) :#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_ = 0\n self.data_[self.front_]= e #Inserting First element in deque either front end or rear end they both lead to the same result.\n self.size_+=1\n # print(\"Case 2\")\n elif (self.front_ ==0):#If the front is at the beginning of the Deque.This may happen after the first insertion.\n self.front_-=1\n self.data_[self.front_] = e\n self.size_+=1\n # print(\"Case 3\")\n else:\n self.front_ -=1 #We add normally \n self.data_[self.front_] = e\n self.size_+=1\n #print(\"Case 4\")", "def add_front(self, item):\n\n self.items.insert(0, item)", "def push_front(self, e):\n # initialize new Node with data e\n newNode = Node(e)\n # if the deque is empty\n if self.size == 0:\n # set the front and back to the new node\n self.front = self.back = newNode\n # if deque is not empty\n else:\n # previous front node is the prior to the new front Node\n newNode.prior = self.front\n # previous front node's next node is new node\n self.front.next = newNode\n # front node is the new node\n self.front = newNode\n # increment deque size\n self.size += 1", "def push(self, item):\n self.list.prepend(item)", "def insertFront(self, item):\n self.sentinel.insertAfter(item)\n self.N += 1", "def push(self, item):\n self.linked_list.prepend(item)", "def append_front(self, item):\n\n self.front = Node(item, self.front)", "def push(self, val):\r\n return self.deque.append(val)", "def enqueue(self, item):\n\t\tself.items.insert(0, item)", "def enqueue(self, item):\n self.items.insert(0, item)", "def enqueue(self, item):\n self.items.insert(0, item)", "def push(self, item):\n\t\tself.top+=1;\n\t\tself.arr.insert(self.top, item);", "def add_first(self, data):\n self.deque.insert(0, data)", "def enqueue(self, item):\n if self.rear == None:\n self.front = Node(item)\n self.rear = self.front\n else:\n self.rear.next = Node(item)\n self.rear = self.rear.next", "def push_front(self, val):\n new_node = Node(val, self.head)\n if self.is_empty():\n self.tail = new_node\n self.head = new_node\n self.size += 1", "def push_front(self, item):\n new_node = Node(item)\n # if the list is empty, make it head\n if self.head is None:\n self.head = new_node\n # else, \n else:\n new_node.next = self.head # new node points to current head\n self.head = new_node # current head points to new_node\n self.n += 1", "def enqueue(self, item):\n self.__queue.insert(0, item)", "def push_back(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_back()\n self.back_+=1\n self.data_[self.back_]= e\n self.size_+=1\n #print(\"case 1\")\n elif (self.front_ == -1 and self.size_==0):#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_=0\n self.data_[self.back_]= e\n self.size_+=1\n else:#The Back is not at the first index(possibly somewhere in between) and if we push back it we have to go up by one to move to the new back\n self.back_+=1\n self.data_[self.back_] =e \n self.size_+=1", "def push(self, item):\r\n self.stack.insert(0, item)", "def push_front(self, val: Generic[T]) -> None:\n first_node = self.node.next\n\n self.node.next = Node(val)\n latest_first = self.node.next\n\n latest_first.prev = self.node #pushes the node to the front\n latest_first.next = first_node\n first_node.prev = latest_first #rearranges the list", "def left_enqueue(self, item):\n item_to_add = Node(item)\n item_to_add.set_next(self.head)\n\n # if the deque is empty, the new item is the tail\n if not self.tail:\n self.tail = item_to_add\n else:\n # connect the old head to the new head\n self.head.set_prev(item_to_add)\n\n # set the new node as the head\n self.head = item_to_add\n self.size += 1", "def push(self, x):\n self.values.append(x)\n if len(self.values) == 1:\n self.front = x", "def enqueue(self, item):\n # double size of array if necessary and recopy to front of array\n if self._N == len(self._q):\n self._resize(2*len(self._q)) # double size of array if necessary\n self._q[self._last] = item # add item\n self._last += 1\n if self._last == len(self._q):\n self._last = 0 # wrap-around\n self._N += 1", "def push_front(self, value):\n new_node = self.Node(value)\n\n # Edge Case : List is empty\n if self._size == 0:\n self._tail = new_node\n self._head = new_node\n self._size += 1\n return\n\n new_node.next = self._head\n self._head.prev = new_node\n self._head = new_node\n self._size += 1", "def addFront(self, item, clock):\n temp = Node2Way(item, clock)\n temp.setPrevious(self._front)\n \n if self._size == 0:\n self._rear = temp\n else:\n self._front.setNext(temp)\n \n self._front = temp\n self._size += 1", "def enqueue(self, item):\n while len(self._stack1) > 0:\n self._stack2.push(self._stack1.pop())\n self._stack2.push(item)", "def enqueue(self, item):\n old_last = self.last\n self.last = self.Node(item)\n\n if self.is_empty():\n self.first = self.last\n else:\n old_last.next_node = self.last\n\n self.N += 1", "def push_front(self, param):\n if self.size == self.capacity:\n self.resize(2 * self.size)\n for _ in range(self.arr):\n pass", "def push(self, item):\n if len(self._data) == self.MAX_SIZE:\n # full we have to pop the oldest item (head)\n self._data.pop(0)\n self._data.append(item)", "def push(self, x): # time O(n)\n self.queue.append(x)\n for _ in range(len(self.queue)-1):\n self.queue.append(self.queue.popleft())", "def enqueue(self, item):\n self.list.append(item)", "def enqueue(self, item):\n self.queue.append(item)", "def enqueue(self, item):\n self.queue.append(item)", "def push_front(self, data):\n n = Node(data)\n if self.empty():\n self.head = n\n return\n\n l = self.head\n self.head = n\n n.next = l\n return", "def push(self, item): # 05:27 Lecture Week 2 \"Stacks\" (16:24)\n oldfirst = self.first # Save a link to the list\n self.first = self._Node(item, oldfirst) # first points to most recent Node\n self.N += 1", "def push(self, item):\n self._tail_iters.append(iter([item]))", "def enqueue(self, val):\n\n if self.front is None:\n self.back = self.front = Node(val)\n self._size += 1\n else:\n self.back._next = self.back = Node(val)\n self._size += 1\n return self.back", "def push(self, item):\n # Reassign the link to top of the stack and update the\n self.top = [item] + self.top", "def right_enqueue(self, item):\n item_to_add = Node(item)\n item_to_add.set_prev(self.tail)\n\n # if the deque is empty, the new item is the head\n if not self.head:\n self.head = item_to_add\n else:\n # connect the old tail to the new tail\n self.tail.set_next(item_to_add)\n\n self.tail = item_to_add\n self.size += 1", "def push(self, node):\n self.prepend(node)", "def enqueue(queue, item):\n new_node = Node(item, None)\n if empty_queue(queue):\n queue.front = new_node\n queue.back = new_node\n else:\n queue.back.next = new_node\n queue.back = new_node\n queue.size = queue.size + 1", "def push(self, x):\n if len(self.stack1) == 0:\n self.front = x\n self.stack1.append(x)", "def push_back(self, e):\n # initialize new node with data e\n newNode = Node(e)\n # if deque is empty\n if self.size == 0:\n # set both front and back to new node\n self.front = self.back = newNode\n # if deque is not empty\n else:\n # set new node's next as the previous back, set previous back's\n # prior to the new node, and set the back of the deque to new node\n newNode.next = self.back\n self.back.prior = newNode\n self.back = newNode\n # increment deque size\n self.size += 1", "def push(self, item):\n heapq.heappush(self.heap, item)", "def push(self, x):\n self.queue.insert(len(self.queue), x)", "def enqueue(Q, x):\n # Q.append(x)\n Q.put_nowait(x)\n if debug: \n print(\"enqueue\", x, \":\", end=\" \")\n show_queue(Q)\n return Q", "def insert_front(self, value: int) -> bool:\r\n if self.size != self.capacity:\r\n self.deque[self.frontIndex] = value\r\n self.size += 1\r\n if self.frontIndex == 0:\r\n self.frontIndex = self.capacity - 1\r\n else:\r\n self.frontIndex -= 1\r\n return True\r\n return False", "def push(self, item: tuple):\n self.__heap.append(item)\n self.__sift_up(self.__len__() - 1)", "def enqueue(self, item):\n self._queue.append(item)", "def enqueue(self, val):\n self.stack1.push(val)", "def push_front(self, value):\n node = DLLNode(value)\n if self.head is None:\n self.tail = node \n else: \n self.head.prev_node = node \n node.next_node = self.head\n self.head = node", "def add_prev(self, item, index):\n if index in self.d_buffer.keys():\n return\n if len(self) == self._size:\n self.pop_last()\n self.add_item(item, index)", "def push(self, item):\n self.stack.append(item)", "def push(self, item):\n self.stack.append(item)", "def test_head_of_deque_when_using_append(val, result, filled_deque):\n filled_deque.append(val)\n assert filled_deque._container.head.val == val", "def enqueue_rear(self, item):\n self._items.append(item)", "def enqueue(self, item):\n\n self.__items__.append(item)", "def push(self,item):\n self.items.append(item)", "def push(self, item):\n self.stack.append(item)\n\n if not self.max or item >= self.max[-1]: # add if empty or if greater\n self.max.append(item)", "def push(self, item):\n pass", "def push(self, item):\n array = self.array\n compare = self.compare\n array.append(item)\n self.pos[item] = len(array) - 1\n high = len(array) - 1\n while high > 0:\n low = (high-1)/2\n if compare(array[low], array[high]) <= 0:\n break\n self.pos[array[high]] = low\n self.pos[array[low]] = high\n array[low], array[high] = array[high], array[low]\n high = low", "def push(self, item):\n\t\tself.items.append(item)", "def enqueue(self, value):\n node = Node(value)\n\n if not self.front:\n\n self.front = node\n self.rear = node\n else:\n self.rear.next = node\n self.rear = self.rear.next", "def push(self, x):\r\n if self.point_to_head.chi == None:\r\n self.point_to_head.chi = MyQueueNode(x)\r\n self.point_to_tail.chi = self.point_to_head.chi\r\n else:\r\n self.point_to_tail.chi.chi = MyQueueNode(x)\r\n self.point_to_tail.chi = self.point_to_tail.chi.chi", "def enqueue(self, x):\r\n self.queue.append(x)\r\n return self.queue", "def add(self, item: T) -> None:\n self._queue.append(item)\n if not self.is_empty():\n self._queue.sort(reverse=True)", "def push(self, x):\n self.queue.insert(0, x)", "def push(self, x):\n self.queue.insert(0, x)", "def push(self, item):\n self._cursor_stack.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n if self.top == None:\n self.top = Node(item)\n else:\n new_node = Node(item)\n new_node.next = self.top\n self.top = new_node", "def enqueue(self, value): ################# <-\n self.lst = self.lst +[value]", "def push(self, x):\r\n self.queue.append(x)", "def push(self, item):\n self.heap.append(self.m * item)\n self._sift_up()", "def insertFront(self, value: int) -> bool:\n \n if not self.isFull():\n self._deque[self._front] = value\n self._front = (self._front + 1) % self._k\n self._elems += 1\n return True\n \n return False", "def enqueue(self, val):\r\n self.queue.append(val)", "def bypass_queue(self, name):\n # self.queue = [name] + self.queue\n # self.queue.insert(0, name)\n\n # self.lst = [name] + self.lst # This person is brought to the front of the queue\n self.lst.insert(0, name) #Not constant time as the pointer is moved for all the members of the queue, 0(n)\n print(f\"{name} has bypassed the queue\")", "def push(self, x: int) -> None:\n _deque_len = len(self._deque)\n self._deque.append(x)\n for i in range(_deque_len):\n self._deque.append(self._deque.pop(0))", "def add(self, element):\n\n if self.style == 'FIFO': # If FIFO, append element to end of list\n self.queue.append(element)\n\n elif self.style == 'LIFO': # If LIFO, append element to front of list\n self.queue.insert(0, element)", "def push(self, item):\n self._pushed.append(item)", "def push(self, elt):\n if len(self._queue) == 0: self._queue.append(elt); return\n for i in range(len(self._queue)):\n if self._queue[i].priority < elt.priority:\n self._queue.insert(i, elt)\n return\n #if we get here, elt is lower than all the other procs in the queue, so\n #just append it\n self._queue.append(elt)", "def enqueue(self, value): ################# <-\n self.top = Node(value, next=self.top)", "def enqueue(self, value): ################# <-\n self.top = Node(value, next=self.top)", "def push(self, Item):\n self.data_container.insert(0, Item)", "def Push(self, item):\n self.list.append(item)", "def push(self, item):\n if item not in self._items:\n self._items.append(item)", "def enqueue(self, x):\n self.s1.push(x)", "def enqueue(self, element):\n self.the_queue.append(element)", "def push(self, item: str):\n if self.hit or not self._stack:\n self._stack.append([])\n self._stack[-1].append(item)\n self.hit = False", "def enqueue(self, val):\n self.q1.append(val)", "def enqueue(self, item: Any) -> None:\n node = Node(item, self.rear)\n self.rear = node\n self.num_items += 1", "def push(self, x: int) -> None:\n self.q.append(x)\n for _ in range(len(self.q) - 1):\n self.q.append(self.q.popleft())", "def enqueue(self, item):\n\n self._data.append(item)", "def push(self, item) -> None:\n self.items.append(item)", "def push(self, item):\n node = Node(item)\n node.next = self._top\n self._top = node" ]
[ "0.7934391", "0.79339534", "0.7679568", "0.74891657", "0.74482065", "0.7417094", "0.7404675", "0.7363072", "0.7294415", "0.72747624", "0.7242487", "0.7242487", "0.72093624", "0.71174246", "0.707182", "0.7052566", "0.70389926", "0.70353955", "0.701858", "0.70012", "0.6997899", "0.69728386", "0.6923687", "0.6915132", "0.69096", "0.68918145", "0.6890495", "0.68384916", "0.68355286", "0.6809102", "0.6802931", "0.6801296", "0.67934257", "0.67934257", "0.67820495", "0.67725104", "0.6769395", "0.67689204", "0.6716947", "0.67076266", "0.6695346", "0.66930276", "0.66776425", "0.66676253", "0.6663456", "0.6658203", "0.66400236", "0.66165113", "0.6610592", "0.65907615", "0.6563159", "0.6559549", "0.6550767", "0.65502894", "0.65502894", "0.6545525", "0.65203846", "0.65155077", "0.65110695", "0.6507889", "0.65000916", "0.64930797", "0.6478924", "0.647582", "0.64740425", "0.6459312", "0.64576036", "0.6437319", "0.6437319", "0.64181054", "0.64025664", "0.64025664", "0.64025664", "0.64025664", "0.64025664", "0.6401612", "0.63977736", "0.6381067", "0.63808215", "0.63787806", "0.63761705", "0.63716185", "0.6367465", "0.63648874", "0.63575816", "0.63510644", "0.63510334", "0.63510334", "0.63508576", "0.63382417", "0.63356686", "0.63355684", "0.6332192", "0.6330904", "0.6329441", "0.63184565", "0.6318265", "0.6316561", "0.63143855", "0.6307564" ]
0.8026828
0
Push the item in the end of the deque
def enqueue_rear(self, item): self._items.append(item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push(self, val):\r\n return self.deque.append(val)", "def push(self, item):\n self._tail_iters.append(iter([item]))", "def push(self, item):\n\t\tself.top+=1;\n\t\tself.arr.insert(self.top, item);", "def push(self, item):\n if len(self._data) == self.MAX_SIZE:\n # full we have to pop the oldest item (head)\n self._data.pop(0)\n self._data.append(item)", "def push_back(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_back()\n self.back_+=1\n self.data_[self.back_]= e\n self.size_+=1\n #print(\"case 1\")\n elif (self.front_ == -1 and self.size_==0):#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_=0\n self.data_[self.back_]= e\n self.size_+=1\n else:#The Back is not at the first index(possibly somewhere in between) and if we push back it we have to go up by one to move to the new back\n self.back_+=1\n self.data_[self.back_] =e \n self.size_+=1", "def push(self, item):\n super().add_item_to_front(item)", "def right_enqueue(self, item):\n item_to_add = Node(item)\n item_to_add.set_prev(self.tail)\n\n # if the deque is empty, the new item is the head\n if not self.head:\n self.head = item_to_add\n else:\n # connect the old tail to the new tail\n self.tail.set_next(item_to_add)\n\n self.tail = item_to_add\n self.size += 1", "def push(self, item):\n self.list.prepend(item)", "def push(self, item):\n self.stack.append(item)\n\n if not self.max or item >= self.max[-1]: # add if empty or if greater\n self.max.append(item)", "def enqueue(self, item):\n # double size of array if necessary and recopy to front of array\n if self._N == len(self._q):\n self._resize(2*len(self._q)) # double size of array if necessary\n self._q[self._last] = item # add item\n self._last += 1\n if self._last == len(self._q):\n self._last = 0 # wrap-around\n self._N += 1", "def enqueue(self, item):\n\t\tself.items.insert(0, item)", "def enqueue(self, item):\n self.items.insert(0, item)", "def enqueue(self, item):\n self.items.insert(0, item)", "def push(self,item):\n self.items.append(item)", "def push(self, item):\n self.linked_list.prepend(item)", "def push_front(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_front()\n self.data_[self.front_]= e#New Front\n self.size_+=1\n # print(\"Case 1\")\n elif(self.front_ == -1 and self.size_ ==0) :#If the Deque is intially empty then when we add the first item that will be both the front and the back \n self.front_= 0\n self.back_ = 0\n self.data_[self.front_]= e #Inserting First element in deque either front end or rear end they both lead to the same result.\n self.size_+=1\n # print(\"Case 2\")\n elif (self.front_ ==0):#If the front is at the beginning of the Deque.This may happen after the first insertion.\n self.front_-=1\n self.data_[self.front_] = e\n self.size_+=1\n # print(\"Case 3\")\n else:\n self.front_ -=1 #We add normally \n self.data_[self.front_] = e\n self.size_+=1\n #print(\"Case 4\")", "def push(self, item):\n pass", "def add_last(self, data):\n self.deque.append(data)", "def push(self, item):\n\t\tself.items.append(item)", "def push(self, x): # time O(n)\n self.queue.append(x)\n for _ in range(len(self.queue)-1):\n self.queue.append(self.queue.popleft())", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item: tuple):\n self.__heap.append(item)\n self.__sift_up(self.__len__() - 1)", "def push(self, item) -> None:\n self.items.append(item)", "def push(self, item):\r\n self.stack.insert(0, item)", "def enqueue(self, item):\n old_last = self.last\n self.last = self.Node(item)\n\n if self.is_empty():\n self.first = self.last\n else:\n old_last.next_node = self.last\n\n self.N += 1", "def push(self, e):\n # if reach the maxlen size\n if self._size == self._maxlen:\n self._front = (self._front + 1) % len(self._data)\n avail = (self._front + self._size - 1) % len(self._data)\n self._data[avail] = e\n return\n # not reach the maxlen size\n if self._size == len(self._data):\n self._resize(2 * self._size)\n avail = (self._front + self._size) % len(self._data)\n self._data[avail] = e\n self._size += 1", "def push(self, item):\n if hasattr(item, \"__iter__\"):\n self.items.extend(item)\n else:\n self.items.append(item)", "def push(self, item):\n if item not in self._items:\n self._items.append(item)", "def push(self, item):\n heapq.heappush(self.heap, item)", "def enqueue(self, item):\n self.__queue.insert(0, item)", "def add(self, item):\r\n if len(self.buff)==self.size: self.buff.pop(0)\r\n self.buff.append(item)", "def push(self, item) -> None:\n self._items.append(item)", "def push(self, item):\n array = self.array\n compare = self.compare\n array.append(item)\n self.pos[item] = len(array) - 1\n high = len(array) - 1\n while high > 0:\n low = (high-1)/2\n if compare(array[low], array[high]) <= 0:\n break\n self.pos[array[high]] = low\n self.pos[array[low]] = high\n array[low], array[high] = array[high], array[low]\n high = low", "def enqueue_front(self, item):\n self._items.insert(0, item)", "def push(self, item):\n self._items.append(item)", "def add(self, item: T) -> None:\n self._queue.append(item)\n if not self.is_empty():\n self._queue.sort(reverse=True)", "def append(self, item):\n\t\theapq.heappush(self.heap, (self.f(item), item))", "def append(self, item):\n if self.full or self.pre_allocated:\n # overwrite\n self.data[self.cur] = item\n else:\n self.data.append(item)\n if not self.full:\n self.full = self.cur == self.max - 1\n self.cur = (self.cur + 1) % self.max", "def enqueue(self, item):\n self.queue.append(item)", "def enqueue(self, item):\n self.queue.append(item)", "def push(self, item):\n self.stack.append(item)", "def push(self, item):\n self.stack.append(item)", "def enqueue(self, item):\n self.list.append(item)", "def push_back(self, e):\n # initialize new node with data e\n newNode = Node(e)\n # if deque is empty\n if self.size == 0:\n # set both front and back to new node\n self.front = self.back = newNode\n # if deque is not empty\n else:\n # set new node's next as the previous back, set previous back's\n # prior to the new node, and set the back of the deque to new node\n newNode.next = self.back\n self.back.prior = newNode\n self.back = newNode\n # increment deque size\n self.size += 1", "def push(self, x):\n self.values.append(x)\n if len(self.values) == 1:\n self.front = x", "def push(self, item): # 05:27 Lecture Week 2 \"Stacks\" (16:24)\n oldfirst = self.first # Save a link to the list\n self.first = self._Node(item, oldfirst) # first points to most recent Node\n self.N += 1", "def push(self, item):\n self._items.append(item)\n # This operation increments the number of items\n # in the stack, we need to update the count variable\n self._update_count()", "def push(self, x):\n self.queue.insert(len(self.queue), x)", "def push(self, item):\n self._pushed.append(item)", "def push(self, Item):\n self.data_container.insert(0, Item)", "def push(self, item):\n # Reassign the link to top of the stack and update the\n self.top = [item] + self.top", "def push(self, item):\n\n self._list.append(item)", "def enqueue(self, item):\n while len(self._stack1) > 0:\n self._stack2.push(self._stack1.pop())\n self._stack2.push(item)", "def push(self, value):\n self.last = self.current\n self.current = np.array(value)", "def push(self, item: T) -> None:\n pass", "def Push(self, item):\n self.list.append(item)", "def enqueue(self, item):\n\n self.__items__.append(item)", "def append ( self , item ) :\n self.cond.acquire()\n try:\n if self.closed :\n raise Exception( \"Trying to append to a closed queue\" )\n else :\n self.weight += int( item['size'] )\n self.push( item )\n self.cond.notify()\n finally:\n self.cond.release()", "def push(self, item):\n self._data.append(item)", "def push(self, value):\n self.append(value)\n return len(self) - 1", "def push(self, x: int) -> None:\n self.que.append(x)", "def push(self, item):\n self.heap.append(self.m * item)\n self._sift_up()", "def push(self, item: str):\n if self.hit or not self._stack:\n self._stack.append([])\n self._stack[-1].append(item)\n self.hit = False", "def enqueue(self, item):\n self._queue.append(item)", "def push(self, x: int) -> None:\n self.q.append(x)\n for _ in range(len(self.q) - 1):\n self.q.append(self.q.popleft())", "def push(self, item):\n self._cursor_stack.append(item)", "def push(self, elem):\n pass", "def push(self, x):\r\n self.queue.append(x)", "def test_deque_append_one_node(dq):\n dq.append(4)\n assert dq._dll.head.data == 4", "def push(self, item: Any) -> None:\n if self.hit is True and self._stack:\n logger.warning(\n \"Previous value of argument %r is overwritten with %r.\",\n self.namestr(),\n item,\n )\n self._stack = []\n\n if self.hit or not self._stack:\n self._stack.append([])\n self._stack[-1].append(item)\n self.hit = False", "def push(self, e):\n\t\tself._head = self._Node(e, self._head)\n\t\tself._size += 1", "def push(self, val):\n self.insert(val)", "def add_first(self, data):\n self.deque.insert(0, data)", "def push(self, x: int) -> None:\n _deque_len = len(self._deque)\n self._deque.append(x)\n for i in range(_deque_len):\n self._deque.append(self._deque.pop(0))", "def push(self, item: Any) -> None:\n self._items.append(item)", "def append(self, item):\r\n self.stack.append(item)", "def push(self, value):\n self.items.append(value)", "def push(self, node):\n try:\n self._load(True)\n\n # Stow the new node at our head and increment it\n self.db[self.head] = node\n self.head = self.head + 1\n if self.head >= self.size:\n self.head -= self.size\n self.db['head'] = self.head\n\n # If we haven't just also pushed out an old item,\n # increment the count of items in our db.\n if self.count < self.size:\n self.count += 1\n self.db['count'] = self.count\n except KeyError:\n # HACK\n self.clear()", "def push(self, item):\n\n self._stack.append(item)", "def pop_from_deque(self):", "def add(self, element):\n\n if self.style == 'FIFO': # If FIFO, append element to end of list\n self.queue.append(element)\n\n elif self.style == 'LIFO': # If LIFO, append element to front of list\n self.queue.insert(0, element)", "def push(self, value):\n if self.please_stop and not self.allow_add_after_close:\n Log.error(\"Do not push to closed queue\")\n\n with self.lock:\n self._wait_for_queue_space()\n if not self.please_stop:\n self.queue.appendleft(value)\n return self", "def push(self, element):\n if not self.full():\n heapq.heappush(self.queue, element)\n self.size += 1\n return True\n else:\n if element >= self.queue[0]:\n heapq.heapreplace(self.queue, element)\n return True\n else:\n return False", "def push(self, transition):\n # if we reached the capacity, delete oldest item\n if (self.size == self.capacity):\n del self.queue[0]\n self.queue.append(transition)", "def push(self, x: int) -> None:\n self.q.append(x)\n for _ in range(len(self.q) - 1):\n self.q.append(self.q.pop(0))", "def enqueue(self, val):\n if self.size+1 == self.capacity:\n self.grow() # double the array size\n #avail = (self.head + self.size) % len(self.data)\n self.data[self.tail] = val\n self.size += 1\n self.tail = (self.tail + 1) % self.capacity\n return None", "def add(self, data):\n wasquiet = True if (self.tail == self.curr) else False\n\n # Assert the queue is clean\n qtail = self.base + \".\" + str(self.tail)\n print \"creating %s\" % qtail\n assert not os.path.exists(qtail)\n qt = open(qtail, \"w\")\n qt.write(data)\n qt.close()\n\n # Where does the next item go\n self.tail += 1\n self._settail(self.tail)\n\n return wasquiet", "def test_head_of_deque_when_using_append(val, result, filled_deque):\n filled_deque.append(val)\n assert filled_deque._container.head.val == val", "def enqueue(self, item):\n\n self._data.append(item)", "def insertBack(self, item):\n self.sentinel.prev.insertAfter(item)\n self.N += 1", "def push(self, x):", "def left_enqueue(self, item):\n item_to_add = Node(item)\n item_to_add.set_next(self.head)\n\n # if the deque is empty, the new item is the tail\n if not self.tail:\n self.tail = item_to_add\n else:\n # connect the old head to the new head\n self.head.set_prev(item_to_add)\n\n # set the new node as the head\n self.head = item_to_add\n self.size += 1", "def push(self, elt):\n if len(self._queue) == 0: self._queue.append(elt); return\n for i in range(len(self._queue)):\n if self._queue[i].priority < elt.priority:\n self._queue.insert(i, elt)\n return\n #if we get here, elt is lower than all the other procs in the queue, so\n #just append it\n self._queue.append(elt)", "def push(self, new_item):\n self.items.append(new_item)", "def add_next(self, item, index):\n if index in self.d_buffer.keys():\n return\n if len(self) == self.size:\n self.pop_first()\n self.add_item(item, index)", "def insert(self, item):\n self.heaplist.append(item)\n self.currentsize += 1\n self.shift_item_up(self.currentsize)" ]
[ "0.77419525", "0.7716661", "0.75581473", "0.74057865", "0.73719305", "0.7366627", "0.7267218", "0.7221633", "0.7205309", "0.71838725", "0.71715564", "0.716415", "0.716415", "0.71557266", "0.7145821", "0.7130648", "0.7110882", "0.71088755", "0.7106317", "0.7062142", "0.7058204", "0.7058204", "0.7058204", "0.7058204", "0.7058204", "0.70562845", "0.7026512", "0.7023156", "0.70049876", "0.69966793", "0.69885325", "0.69856215", "0.69717085", "0.69608843", "0.69413155", "0.6931694", "0.6926045", "0.69219255", "0.6920903", "0.69196314", "0.691714", "0.69105864", "0.6883894", "0.6883894", "0.6874171", "0.6874171", "0.6864045", "0.68628085", "0.6846585", "0.68445545", "0.684336", "0.6843081", "0.6839465", "0.68296254", "0.68243414", "0.6790193", "0.6785099", "0.67810583", "0.67798686", "0.67717445", "0.676561", "0.6746466", "0.6737541", "0.6726032", "0.6722799", "0.671989", "0.67186314", "0.67026204", "0.67023116", "0.6696492", "0.6688167", "0.66740113", "0.6670849", "0.66689193", "0.66673374", "0.6658925", "0.66557384", "0.6646234", "0.66430753", "0.6638132", "0.6629656", "0.6629561", "0.66148007", "0.6612411", "0.6608504", "0.66057986", "0.659393", "0.65916294", "0.6587762", "0.6554451", "0.6546432", "0.65435004", "0.653475", "0.6534649", "0.6533724", "0.65271926", "0.6524806", "0.65243554", "0.6523379", "0.6517638" ]
0.6802287
55
Pop the item in the front of the deque. Raise IndexError if the deque is empty.
def dequeue_front(self): try: return self._items.pop(0) except: raise IndexError('The deque is empty')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop_front(self):\n # set temp to deque's front for return\n temp = self.front\n # if deque is empty\n if self.size == 0:\n # raise IndexError\n raise IndexError()\n # if deque has one element\n elif self.size == 1:\n # empty the deque completely\n self.back = None\n self.front = None\n self.size -= 1\n # if the deque has more than one element\n else:\n # set front to front's prior node, set that node's next to\n # none, and decrement deque's size by 1\n self.front = self.front.prior\n self.front.next = None\n self.size -= 1\n # return previous front node's data\n return temp.data", "def pop(self):\n if self.isEmpty():\n raise KeyError(\"The queue is empty.\")\n oldItem = self._front.data\n self._front = self._front.next\n if self._front is None:\n self._rear = None\n self._size -= 1\n return oldItem", "def pop(self):\r\n try:\r\n return self.pop_from_deque()\r\n except IndexError:\r\n return None", "def dequeue_rear(self):\n try:\n return self._items.pop()\n except:\n raise IndexError('The deque is empty')", "def pop(self):\n if self._size > 0:\n elem = self.first.data\n self.first = self.first.next\n self._size = self._size - 1\n return elem\n \n raise IndexError('The queue is empty! ')", "def pop_back(self):\n # set temp to back node of deque\n temp = self.back\n # if the deque is empty\n if self.size == 0:\n # raise IndexError\n raise IndexError()\n # if deque has one element\n elif self.size == 1:\n # empty the deque completely\n self.back = None\n self.front = None\n self.size -= 1\n # if deque has more than one element\n else:\n # set deque's back to previous back's next, set the new\n # back's prior to None, and decrement deque size\n self.back = self.back.next\n self.back.prior = None\n self.size -= 1\n # return previous back node's data\n return temp.data", "def pop(self):\n try:\n return self._values.pop()\n except IndexError:\n raise IndexError('Cannot pop from empty deque.')", "def peek_back(self):\n if ((self.is_empty()) or self.data_[self.back_]== None):#If we trip this if block we raise an error since we know the deque should be empty \n raise IndexError\n return self.data_[self.back_]", "def removeFront(self):\n if self._size == 0:\n raise AttributeError(\"Cannot removeFront from an empty Deque\")\n \n temp = self._front\n self._front = self._front.getPrevious()\n if self._size == 1:\n # removing only item which is the rear as well as the front item\n self._rear = None\n else:\n self._front.setNext(None)\n self._size -= 1\n \n return temp.getData()", "def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...", "def remove(self, index):\n if index < 0 or index >= len(self):\n raise AttributeError(\"i must be >= 0 and < size of queue\")\n if index == 0:\n oldItem = self._front.data\n self._front = self._front.next\n else:\n probe = self._front\n while index > 1:\n probe = probe.next\n index -= 1\n oldItem = probe.next.data\n probe.next = probe.next.next\n self._size -= 1\n if self.isEmpty():\n self._rear = None\n return oldItem", "def dequeue(self):\n if not self.front:\n raise AttributeError(\"Can't dequeue from an empty queue\")\n\n removed = self.front\n self.front = self.front.next\n return removed.value\n # try:\n # removed = self.front\n # self.front = self.front.next\n # return removed.value\n # except AttributeError:\n # return \"Can't dequeue from an empty queue\"", "def dequeue(self):\n if self.is_empty():\n raise ValueError('Queue underflow')\n\n item = self.first.item\n self.first = self.first.next_node\n self.N -= 1\n\n if self.is_empty():\n self.last = None # To avoid loitering\n\n return item", "def dequeue(self):\n try:\n temp = self.front\n self.front = self.front.next\n temp.next = None\n return temp.value\n except Exception:\n return \"the queue is empty\"", "def dequeue(self):\n try:\n return self._container.pop()\n except IndexError:\n raise IndexError(\"Cannot dequeue from empty queue.\")", "def pop(self): # 06:30 Lecture Week 2 \"Stacks\" (16:24)\n if self.isEmpty():\n raise Exception(\"Stack underflow\")\n item = self.first.Item # save item to return\n self.first = self.first.Next # delete last Node added\n self.N -= 1\n return item # return the saved item", "def dequeue(self):\n if self.is_empty():\n raise Empty(\"Queue is empty\")\n answer = self._data[self._front]\n self._data[self._front]\n self._data = (self._front+1)%len(self._data)\n self._size-=1\n return answer", "def dequeue(self):\n if self.isEmpty():\n raise Exception(\"Queue underflow\")\n item = self._q[self._first]\n self._q[self._first] = None # to avoid loitering\n self._N -= 1\n self._first += 1\n if self._first == len(self._q):\n self._first = 0 # wrap-around\n # shrink size of array if necessary\n if self._N > 0 and self._N == len(self._q)/4:\n self._resize(len(self._q)/2)\n return item", "def peek_front(self):\n if ((self.is_empty()) or self.data_[self.front_]== None): #If we trip this if block we raise an error since we know the deque should be empty \n raise IndexError\n return self.data_[self.front_]", "def dequeue(self):\r\n if self.size():\r\n self.queue.pop(0)\r\n else:\r\n raise IndexError(\"Queue is empty.\")", "def dequeue(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val", "def pop_from_deque(self):", "def pop(self):\n return super().remove_item_from_front()", "def dequeue(self) -> object:\r\n if self.is_empty():\r\n raise QueueException\r\n value = self.da.get_at_index(0)\r\n self.da.remove_at_index(0)\r\n return value", "def pop(self):\n try:\n if self.size() > 0:\n top = self.top()\n self.items.pop()\n return top\n else:\n raise IndexError('Cannot pop item, stack is empty.')\n except IndexError as err:\n print(err)\n raise", "def dequeue(self):\n if len(self) == 1:\n self.tail = None\n return self.pop()", "def peek_back(self):\n # if the deque is empty\n if self.is_empty():\n # raise an IndexError\n raise IndexError()\n # if deque is not empty, return back's data\n return self.back.data", "def pop(self, pos=None):\n \n if self.is_empty():\n raise IndexError('pop from empty list')\n \n if pos is None:\n pos = self.length() - 1\n \n elif pos >= self.length():\n raise IndexError('pop index out of range')\n \n previous = None\n current = self.head\n \n for _ in range(pos):\n previous = current\n current = current.get_next()\n \n # If the item to be removed is the first item\n if pos == 0:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n \n return current.get_data()", "def pop(self):\n\n if not self.empty:\n i = self._begin\n\n self._begin = (self._begin + 1) % self._capacity\n self._size -= 1\n\n return (self[i])\n else:\n raise ValueError", "def pop(self, index: int) -> Any:\n # If empty raise indexerror\n if self.is_empty():\n raise IndexError\n # Pop at the beginning of the list.\n elif index == 0:\n item = self._first\n # modify self._first\n self._first = self._rest._first\n self._rest = self._rest._rest\n return item\n # Recursive case\n else:\n if not self._rest:\n raise IndexError\n return self._rest.pop(index - 1)", "def pop_front(self):\n if (self.is_empty()):\n raise IndexError\n elif(self.front_ == self.back_):#Case where there is only one element in Deque so if we pop it we need to reassign front and back\n first_elm =self.data_[self.front_]#Store first element before we remove it \n self.data_.remove(self.data_[self.front_])\n self.front_ =-1\n self.back_ =-1\n self.size_ -=1\n return first_elm\n elif(self.front_ ==-1):#Front is at last index and we need to wrap around the circle to reposition the new front \n first_elm = self.data_[self.front_]\n self.data_.remove(self.data_[self.front_])\n self.front_=0#We reposition front to be at the starting index to account for the wrap around\n self.size_-=1\n return first_elm\n elif(self.front_ ==0 and self.size_!=0):#Front is at last index and we have more than one item sp we need to wrap around the circle to reposition the new front \n first_elm = self.data_[self.front_]\n self.data_.remove(self.data_[self.front_])\n self.front_=0#We reposition front to be at the starting index\n self.size_-=1\n return first_elm\n else:#The front is not at the last index(possibly somewhere in between) and if we pop it we have to go up by one for the new front\n first_elm = self.data_[self.front_]\n self.data_.remove(self.data_[self.front_])\n self.front_+=1\n self.size_-=1\n return first_elm", "def pop(self) -> int:\n return self._deque.pop(0)", "def peek_front(self):\n # if the deque is empty\n if self.is_empty():\n # call an IndexError\n raise IndexError()\n # if deque is not empty, return front's data\n return self.front.data", "def pop(self):\n if len(self) == 0:\n if self.none_for_empty:\n return None\n raise ValueError(\"Buffer is empty\")\n pt = self.buf[self.front]\n if self.rear == self.front:\n self.rear = None\n else:\n self.front = self.length - 1 if self.front == 0 else self.front - 1\n return pt", "def pop(self) -> int:\n \n temp = deque()\n while self.elements:\n temp.append(self.elements.pop())\n \n front_el = temp.pop()\n \n while temp:\n self.elements.append(temp.pop())\n \n return front_el", "def pop(self, index):\r\n if index < 0 or index >= self.size():\r\n raise IndexError(\"Array index out of bounds\")\r\n itemToReturn = self._items[index]\r\n # Shift items up by one position\r\n for i in range(index, self.size() - 1):\r\n self._items[i] = self._items[i + 1]\r\n # Reset empty slot to fill value\r\n self._items[self.size() - 1] = self._fillValue\r\n self._logicalSize -= 1\r\n if self.size() <= len(self) // 4 and len(self) > self._capacity:\r\n self.shrink()\r\n return itemToReturn", "def dequeue(self):\n\n item = self.__items__.pop(0)\n return item", "def remove_front(self):\n\n if self.items:\n return self.items.pop(0)\n return None", "def pop(self):\n self._raise_if_empty()\n item = self._top.data\n self._top = self._top.next\n return item", "def pop(self):\n try:\n item = self._items.pop()\n # This operation decrements the number of items\n # in the stack, we need to update the count variable\n self._update_count()\n return item\n except IndexError:\n raise IndexError(\"Stack is empty\")", "def pop(self) -> int:\n cur = None\n if(not self.empty()):\n cur = self.queue[0] \n self.queue = self.queue[1:] \n return cur", "def pop(self):\r\n\r\n if not self.is_empty():\r\n\r\n half_cap = self._capacity // 2\r\n item = self._data[self._size-1]\r\n self._data[self._size-1] = 0\r\n self._size -= 1\r\n\r\n if self._size <= half_cap:\r\n if half_cap != 0:\r\n\r\n self.shrink()\r\n\r\n return item\r\n\r\n else:\r\n pass", "def dequeue(self):\n if self.is_empty():\n raise Empty(\"Queue underflow.\")\n element = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty():\n self._tail = None\n return element", "def test_pop_left_on_empty_deque():\n from deque import Deque\n dq = Deque()\n with pytest.raises(IndexError):\n dq.pop_left()", "def pop_item(self, index):\n ix, obj = self.items\n if index < len(ix):\n self.d_buffer.pop(ix[index])\n else:\n raise IndexError('Buffer does not have {0} elements'.format(index))", "def pop(self):\n try:\n return self.array.pop()\n except IndexError as e:\n return None", "def dequeue(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n answer = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty(): # special case as queue is empty\n self._tail = None # removed head had been the tail\n return answer", "def dequeue(self):\n if self.is_empty():\n raise Empty(\"Queue underflow.\")\n head = self._tail._next\n element = head._element\n self._size -= 1\n if self.is_empty():\n self._tail = None\n else:\n self._tail._next = head._next\n return element", "def dequeue(self):\n if self.items:\n return self.items.pop()\n return None", "def pop(self):\n if self._size == 0:\n raise Exception('Stack is empty')\n else:\n self._size -= 1\n return self.elements.pop(0)", "def pop_front(self):\n if self.is_empty():\n return None\n val = self.head.value\n # Update head and size\n self.head = self.head.next_node\n self.size -= 1\n # If the only node was removed, also need to update tail\n if self.is_empty():\n self.tail = None\n return val", "def dequeue(self):\n temp = self.front\n self.front = self.front.getPtr()\n return temp.getData()", "def pop(self):\n size = self._list.size()\n if size == 0:\n return None\n data = self._list.tail.data\n self._list.removeIndex(size-1)\n return data", "def pop(self):\n if self.is_empty():\n raise RuntimeError(\"Attempt to pop the empty stack!\")\n item = self.top()\n self._items = self._items[:-1]\n return item", "def _pop_first(self) -> Any:\n if self.is_empty():\n raise IndexError\n return self.pop(0)", "def dequeue(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is empty')\n\t\tanswer = self._head._element\n\t\tself._head = self._head._next\n\t\tself._size -= 1\n\t\tif self.is_empty():\n\t\t\tself._tail = None\n\t\treturn answer", "def dequeue(self):\n if not self.is_empty():\n return self._queue_items.pop()\n else:\n raise QueueException('dequeue operation not supported on an empty queue')", "def dequeue(self) -> object:\n\n # Check if stack is empty\n if self.size() == 0:\n raise QueueException\n\n output = self.da.get_at_index(0)\n self.da.remove_at_index(0)\n return output", "def popitem(self):\n return self.pop(0)", "def dequeue(queue):\n item = front(queue)\n queue.front = queue.front.next\n if empty_queue(queue):\n queue.back = None\n\n queue.size = queue.size - 1\n\n return item", "def pop(self):\n try:\n return self._items.pop()\n except:\n raise IndexError('The stack is empty.')", "def pop_front(self):\n if self.head is None:\n raise IndexError('pop_front from empty list')\n node = self.head \n if node.next_node is None:\n self.tail = None \n else: \n node.next_node.prev_node = None \n self.head = node.next_node\n return node.value", "def pop(self):\n self.queue.insert(len(self.queue), self.queue[0])\n self.queue.remove(self.queue[0])\n return self.queue.pop()", "def pop(self):\n if self.__size == 0:\n return None\n else:\n data = self.__head.get_data()\n self.__head = self.__head.get_next()\n self.__size -= 1\n return data", "def dequeue(self):\r\n if self.is_empty():\r\n raise Empty(\"Queue is empty\")\r\n answer = self._head._element\r\n self._head = self._head._next\r\n self._size -= 1\r\n if self.is_empty():\r\n self._tail = None\r\n return answer", "def pop(self, index=None, last=True):\n if index == None:\n return super().pop(last)\n else:\n ret = self[index]\n self.remove(ret)\n return ret", "def pop(self) -> int:\n if self.empty():\n raise RuntimeError(\"Queue is empty!\")\n result = self.data[self.head]\n self.data[self.head] = None\n self.head = (self.head + 1) % len(self.data)\n self.size -= 1\n if 0 < self.size < len(self.data) // 4 and len(self.data) > 10:\n self.resize(len(self.data) // 2)\n return result", "def pop(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val", "def pop(self, pos: int = -1):\n idx = pos if pos >= 0 else len(self) + pos\n if not 0 <= idx < len(self): raise KeyError('Index out of bound!')\n\n p = self.head\n while idx > 0: # stop before pos\n p = p.next\n idx -= 1\n\n # be careful when pop the last element we must change the self.tail\n if p.next is self.tail:\n self.tail = p\n\n # be careful that pop from an empty Linkedlist\n if p.next is not None:\n res = p.next.data\n p.next = p.next.next\n self._size -= 1\n else:\n res = None\n\n return res", "def peek(self):\n if self.is_empty():\n raise ValueError('Queue underflow')\n return self.first.item", "def pop(self):\n heap = self.heap\n if len(heap) < 1:\n return None\n\n ret_val = self.front()\n self.__delete(0)\n return ret_val", "def dequeue(self):\n if self.head is None:\n raise Exception(\"nothing to dequeue, queue is empty\")\n _head = self.head.value\n self.head = self.head.next\n self.size -= 1\n if self.is_empty():\n self.tail = None\n return _head", "def pop(self, idx=-1):\n to_ret =self. __getitem__(idx)\n self.__delitem__(idx)\n return to_ret", "def test_pop_empty_list():\n from deque import Deque\n dq = Deque()\n with pytest.raises(IndexError):\n dq.pop()", "def pop(self):\n\t\tif self.heap:\n\t\t\treturn heapq.heappop(self.heap)[1]\n\t\telse:\n\t\t\traise Exception('Trying to pop from empty PriorityQueue.')", "def popleft(self):\n try:\n return self._values.shift()\n except IndexError:\n raise IndexError('Cannot popleft from empty deque.')", "def pop(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Stack is empty')\n\t\tanswer = self._head._element\n\t\tself._head = self._head._next \n\t\tself._size -= 1\n\t\treturn answer", "def pop(self):\n if self.is_empty():\n raise Exception(\"Stack is empty.\")\n\n self.size -= 1\n return self.arr.pop()", "def pop(self):\n item = self.stack[-1]\n self.stack = self.stack[:-1]\n return item", "def pop(self):\n\n assert self.size > 0, \"Cannot pop item! The MaxHeap is empty!\"\n ret = self.items[0]\n self.items[0] = self.items[self.size - 1]\n self.items[self.size - 1] = None\n self.size -= 1\n self._shift_down(0)\n return ret", "def pop(self):\n if self.is_empty():\n raise Empty('Stack is empty')\n result = self._head._element\n self._head = self._head._next # bypass the former top node\n self._size -= 1\n return result", "def test_removing_the_first_val_in_deque(filled_deque):\n assert filled_deque.popleft() == 4", "def pop_back(self):\n if (self._size == 0):\n return None\n\n # Edge case, list has only one element\n # Behave same as pop_front()\n if (self._size == 1):\n return self.pop_front()\n\n output_value = self._tail.value\n\n self._tail = self._tail.prev\n self._tail.next = None\n self._size -= 1\n\n return output_value", "def pop(self):\n if not self.isEmpty():\n self.top -= 1\n return self.stack.pop()\n else:\n raise Exception(\"Stack Underflow\")", "def pop_front(self):\n if (self._size == 0):\n return None\n\n output_value = self._head.value\n\n self._head = self._head.next\n self._head.prev = None\n self._size -= 1\n\n # Edge case, list is now empty\n if (self._size == 0):\n self._tail = None\n\n return output_value", "def test_pop_empty_deque_raises_error(empty_deque):\n with pytest.raises(IndexError):\n empty_deque.pop()", "def dequeue(self):\n\n return self._data.pop(0)", "def pop_front(self):\n if self.n==0:\n print(\"Error; empty list\")\n return\n else:\n temp = self.head # retrieve front node\n self.head = temp.next # assign head to the second node\n self.n -= 1\n return temp.val", "def dequeue(self):\n return self.the_queue.pop(0)", "def dequeue(self) -> Any:\n if self.rear is None and self.front is None:\n raise IndexError\n elif self.front is None:\n while self.rear is not None:\n node = Node(self.rear.value, self.front)\n self.front = node\n self.rear = self.rear.rest\n assert isinstance(self.front, Node)\n temp = self.front.value\n self.front = self.front.rest\n self.num_items -= 1\n return temp\n else:\n temp = self.front.value\n self.front = self.front.rest\n self.num_items -= 1\n return temp", "def pop_at_index(self, index):\n if index==0:\n return self.pop_head()\n count=0\n start=self.head\n while count<index-1:\n start=start.getLink()\n count+=1\n if start.getLink()==None:\n return None\n temp=start.getLink()\n if temp.getLink()==None:\n return self.pop_tail()\n start.setLink(temp.getLink())\n return temp", "def pop_front(self):\n if self.empty():\n return \"Linked List is Empty\"\n\n h = self.head\n if h.next is None:\n self.head = None\n return h.data\n\n self.head = h.next\n return h.data", "def dequeue(self) -> object:\n return self._data.pop(0)", "def dequeue(self) -> object:\n return self._data.pop(0)", "def pop(self, idx=-1):\n if idx < 0: # For negative indexing, convert to positive counterpart\n idx = self._convert_negative_index(idx)\n if not 0 <= idx < self._length: # Ignore indices outside of bounds\n raise IndexError(f'index {idx} out of bounds')\n\n element = self._arr[idx] # Save element so it can be returned\n # Move all elements after index i one forward to \"delete\" element\n for i in range(idx, self._length - 1):\n self._arr[i] = self._arr[i + 1]\n self._length -= 1\n self._check_shrink() # Shrink array if length is too small\n return element", "def pop(self, index=-1):\n if not self.stack:\n raise ReversePolishCalcError, \"Stack is empty\"\n try:\n del self.stack[index]\n except IndexError:\n errmsg = \"Cannot pop element '%s' from stack\" % index\n raise ReversePolishCalcError, errmsg\n return self.stack", "def pop(self):\n if self.is_empty():\n raise Empty(\"Stack underflow.\")\n self._size -= 1\n e = self._data[self._size]\n self._data[self._size] = None\n if 0 < self._size == self._capacity // 4:\n self._resize(self._capacity // 2)\n return e", "def dequeue(self):\n\n temp = self.front\n self.front = self.front.next\n return temp.data", "def pop_last(self):\n self.pop_item(-1)", "def dequeue(self):\r\n return self.queue.pop(0)" ]
[ "0.7918459", "0.7688422", "0.7626735", "0.76226133", "0.7617293", "0.7505443", "0.7500941", "0.7439415", "0.740664", "0.7395839", "0.73582757", "0.73489094", "0.7289233", "0.7263663", "0.7241764", "0.724081", "0.7221747", "0.71959555", "0.7189014", "0.7156972", "0.7149371", "0.71443814", "0.708917", "0.7076477", "0.7070038", "0.70555925", "0.7050435", "0.70376104", "0.7025924", "0.70223874", "0.7011616", "0.69991744", "0.6980049", "0.6958906", "0.6933248", "0.6929943", "0.6925804", "0.6918445", "0.69175476", "0.6914113", "0.6910778", "0.6909239", "0.6898647", "0.68918294", "0.6870263", "0.6865405", "0.686347", "0.68631804", "0.68615097", "0.6846419", "0.68364125", "0.68363553", "0.6833046", "0.68274784", "0.68262", "0.68227464", "0.68201584", "0.6818901", "0.6801095", "0.6798019", "0.6796561", "0.6786023", "0.6782712", "0.6782386", "0.67808545", "0.6771639", "0.67700064", "0.6769611", "0.6763054", "0.67514986", "0.67495674", "0.6741051", "0.6734659", "0.6730746", "0.67297864", "0.6719735", "0.67191803", "0.6715657", "0.67067903", "0.6704466", "0.6701177", "0.66979074", "0.66930914", "0.6686689", "0.6686036", "0.66831064", "0.66823286", "0.66816604", "0.6677646", "0.6671789", "0.66715944", "0.6668693", "0.6664595", "0.6664595", "0.66600907", "0.66555846", "0.6651128", "0.6640411", "0.6634728", "0.66205055" ]
0.81932133
0
Pop the item in the end of the deque. Raise IndexError if the deque is empty.
def dequeue_rear(self): try: return self._items.pop() except: raise IndexError('The deque is empty')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop(self):\r\n try:\r\n return self.pop_from_deque()\r\n except IndexError:\r\n return None", "def pop(self):\n try:\n return self._values.pop()\n except IndexError:\n raise IndexError('Cannot pop from empty deque.')", "def pop(self):\n if self.isEmpty():\n raise KeyError(\"The queue is empty.\")\n oldItem = self._front.data\n self._front = self._front.next\n if self._front is None:\n self._rear = None\n self._size -= 1\n return oldItem", "def pop(self):\n if self._size > 0:\n elem = self.first.data\n self.first = self.first.next\n self._size = self._size - 1\n return elem\n \n raise IndexError('The queue is empty! ')", "def dequeue_front(self):\n try:\n return self._items.pop(0)\n except:\n raise IndexError('The deque is empty')", "def pop(self): # 06:30 Lecture Week 2 \"Stacks\" (16:24)\n if self.isEmpty():\n raise Exception(\"Stack underflow\")\n item = self.first.Item # save item to return\n self.first = self.first.Next # delete last Node added\n self.N -= 1\n return item # return the saved item", "def dequeue(self):\n try:\n return self._container.pop()\n except IndexError:\n raise IndexError(\"Cannot dequeue from empty queue.\")", "def dequeue(self):\r\n if self.size():\r\n self.queue.pop(0)\r\n else:\r\n raise IndexError(\"Queue is empty.\")", "def pop(self):\n\n if not self.empty:\n i = self._begin\n\n self._begin = (self._begin + 1) % self._capacity\n self._size -= 1\n\n return (self[i])\n else:\n raise ValueError", "def dequeue(self):\n if self.is_empty():\n raise ValueError('Queue underflow')\n\n item = self.first.item\n self.first = self.first.next_node\n self.N -= 1\n\n if self.is_empty():\n self.last = None # To avoid loitering\n\n return item", "def pop_item(self, index):\n ix, obj = self.items\n if index < len(ix):\n self.d_buffer.pop(ix[index])\n else:\n raise IndexError('Buffer does not have {0} elements'.format(index))", "def dequeue(self):\n if self.isEmpty():\n raise Exception(\"Queue underflow\")\n item = self._q[self._first]\n self._q[self._first] = None # to avoid loitering\n self._N -= 1\n self._first += 1\n if self._first == len(self._q):\n self._first = 0 # wrap-around\n # shrink size of array if necessary\n if self._N > 0 and self._N == len(self._q)/4:\n self._resize(len(self._q)/2)\n return item", "def pop(self):\n try:\n item = self._items.pop()\n # This operation decrements the number of items\n # in the stack, we need to update the count variable\n self._update_count()\n return item\n except IndexError:\n raise IndexError(\"Stack is empty\")", "def pop_back(self):\n # set temp to back node of deque\n temp = self.back\n # if the deque is empty\n if self.size == 0:\n # raise IndexError\n raise IndexError()\n # if deque has one element\n elif self.size == 1:\n # empty the deque completely\n self.back = None\n self.front = None\n self.size -= 1\n # if deque has more than one element\n else:\n # set deque's back to previous back's next, set the new\n # back's prior to None, and decrement deque size\n self.back = self.back.next\n self.back.prior = None\n self.size -= 1\n # return previous back node's data\n return temp.data", "def pop(self, pos=None):\n \n if self.is_empty():\n raise IndexError('pop from empty list')\n \n if pos is None:\n pos = self.length() - 1\n \n elif pos >= self.length():\n raise IndexError('pop index out of range')\n \n previous = None\n current = self.head\n \n for _ in range(pos):\n previous = current\n current = current.get_next()\n \n # If the item to be removed is the first item\n if pos == 0:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n \n return current.get_data()", "def pop(self) -> int:\n return self._deque.pop(0)", "def pop_from_deque(self):", "def pop(self, index):\r\n if index < 0 or index >= self.size():\r\n raise IndexError(\"Array index out of bounds\")\r\n itemToReturn = self._items[index]\r\n # Shift items up by one position\r\n for i in range(index, self.size() - 1):\r\n self._items[i] = self._items[i + 1]\r\n # Reset empty slot to fill value\r\n self._items[self.size() - 1] = self._fillValue\r\n self._logicalSize -= 1\r\n if self.size() <= len(self) // 4 and len(self) > self._capacity:\r\n self.shrink()\r\n return itemToReturn", "def remove(self, index):\n if index < 0 or index >= len(self):\n raise AttributeError(\"i must be >= 0 and < size of queue\")\n if index == 0:\n oldItem = self._front.data\n self._front = self._front.next\n else:\n probe = self._front\n while index > 1:\n probe = probe.next\n index -= 1\n oldItem = probe.next.data\n probe.next = probe.next.next\n self._size -= 1\n if self.isEmpty():\n self._rear = None\n return oldItem", "def pop(self):\r\n\r\n if not self.is_empty():\r\n\r\n half_cap = self._capacity // 2\r\n item = self._data[self._size-1]\r\n self._data[self._size-1] = 0\r\n self._size -= 1\r\n\r\n if self._size <= half_cap:\r\n if half_cap != 0:\r\n\r\n self.shrink()\r\n\r\n return item\r\n\r\n else:\r\n pass", "def pop(self):\n try:\n if self.size() > 0:\n top = self.top()\n self.items.pop()\n return top\n else:\n raise IndexError('Cannot pop item, stack is empty.')\n except IndexError as err:\n print(err)\n raise", "def pop(self):\n try:\n return self.array.pop()\n except IndexError as e:\n return None", "def dequeue(self):\n if self.is_empty():\n raise Empty(\"Queue is empty\")\n answer = self._data[self._front]\n self._data[self._front]\n self._data = (self._front+1)%len(self._data)\n self._size-=1\n return answer", "def dequeue(self) -> object:\r\n if self.is_empty():\r\n raise QueueException\r\n value = self.da.get_at_index(0)\r\n self.da.remove_at_index(0)\r\n return value", "def pop(self):\n try:\n return self._items.pop()\n except:\n raise IndexError('The stack is empty.')", "def pop_last(self):\n self.pop_item(-1)", "def pop(self, index: int) -> Any:\n # If empty raise indexerror\n if self.is_empty():\n raise IndexError\n # Pop at the beginning of the list.\n elif index == 0:\n item = self._first\n # modify self._first\n self._first = self._rest._first\n self._rest = self._rest._rest\n return item\n # Recursive case\n else:\n if not self._rest:\n raise IndexError\n return self._rest.pop(index - 1)", "def pop(self):\n if self.is_empty():\n raise Exception(\"Stack is empty.\")\n\n self.size -= 1\n return self.arr.pop()", "def dequeue(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val", "def pop(self):\n size = self._list.size()\n if size == 0:\n return None\n data = self._list.tail.data\n self._list.removeIndex(size-1)\n return data", "def dequeue(self):\n if not self.is_empty():\n return self._queue_items.pop()\n else:\n raise QueueException('dequeue operation not supported on an empty queue')", "def dequeue(self):\n if not self.front:\n raise AttributeError(\"Can't dequeue from an empty queue\")\n\n removed = self.front\n self.front = self.front.next\n return removed.value\n # try:\n # removed = self.front\n # self.front = self.front.next\n # return removed.value\n # except AttributeError:\n # return \"Can't dequeue from an empty queue\"", "def pop(self):\n if self._size == 0:\n raise Exception('Stack is empty')\n else:\n self._size -= 1\n return self.elements.pop(0)", "def pop(self, idx=-1):\n to_ret =self. __getitem__(idx)\n self.__delitem__(idx)\n return to_ret", "def dequeue(self):\n if len(self) == 1:\n self.tail = None\n return self.pop()", "def pop(self, pos: int = -1):\n idx = pos if pos >= 0 else len(self) + pos\n if not 0 <= idx < len(self): raise KeyError('Index out of bound!')\n\n p = self.head\n while idx > 0: # stop before pos\n p = p.next\n idx -= 1\n\n # be careful when pop the last element we must change the self.tail\n if p.next is self.tail:\n self.tail = p\n\n # be careful that pop from an empty Linkedlist\n if p.next is not None:\n res = p.next.data\n p.next = p.next.next\n self._size -= 1\n else:\n res = None\n\n return res", "def pop(self, idx=-1):\n if idx < 0: # For negative indexing, convert to positive counterpart\n idx = self._convert_negative_index(idx)\n if not 0 <= idx < self._length: # Ignore indices outside of bounds\n raise IndexError(f'index {idx} out of bounds')\n\n element = self._arr[idx] # Save element so it can be returned\n # Move all elements after index i one forward to \"delete\" element\n for i in range(idx, self._length - 1):\n self._arr[i] = self._arr[i + 1]\n self._length -= 1\n self._check_shrink() # Shrink array if length is too small\n return element", "def pop(self, index=None, last=True):\n if index == None:\n return super().pop(last)\n else:\n ret = self[index]\n self.remove(ret)\n return ret", "def peek_back(self):\n if ((self.is_empty()) or self.data_[self.back_]== None):#If we trip this if block we raise an error since we know the deque should be empty \n raise IndexError\n return self.data_[self.back_]", "def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...", "def pop_front(self):\n # set temp to deque's front for return\n temp = self.front\n # if deque is empty\n if self.size == 0:\n # raise IndexError\n raise IndexError()\n # if deque has one element\n elif self.size == 1:\n # empty the deque completely\n self.back = None\n self.front = None\n self.size -= 1\n # if the deque has more than one element\n else:\n # set front to front's prior node, set that node's next to\n # none, and decrement deque's size by 1\n self.front = self.front.prior\n self.front.next = None\n self.size -= 1\n # return previous front node's data\n return temp.data", "def pop(self) -> int:\n cur = None\n if(not self.empty()):\n cur = self.queue[0] \n self.queue = self.queue[1:] \n return cur", "def dequeue(self):\n\n item = self.__items__.pop(0)\n return item", "def dequeue(self):\n try:\n temp = self.front\n self.front = self.front.next\n temp.next = None\n return temp.value\n except Exception:\n return \"the queue is empty\"", "def pop(self):\n self._raise_if_empty()\n item = self._top.data\n self._top = self._top.next\n return item", "def pop(self) -> int:\n self._aux()\n ret = self.q1.popleft()\n self.q1, self.q2 = self.q2, self.q1\n self.size -= 1\n return ret", "def pop(self) -> int:\n if self.empty():\n raise RuntimeError(\"Queue is empty!\")\n result = self.data[self.head]\n self.data[self.head] = None\n self.head = (self.head + 1) % len(self.data)\n self.size -= 1\n if 0 < self.size < len(self.data) // 4 and len(self.data) > 10:\n self.resize(len(self.data) // 2)\n return result", "def dequeue(self):\n if self.items:\n return self.items.pop()\n return None", "def pop(self):\n\t\tif self.heap:\n\t\t\treturn heapq.heappop(self.heap)[1]\n\t\telse:\n\t\t\traise Exception('Trying to pop from empty PriorityQueue.')", "def pop(self, item_index: Index) -> Item:\n raise NotImplementedError(\"pop\")", "def pop(self):\n if self.is_empty():\n raise RuntimeError(\"Attempt to pop the empty stack!\")\n item = self.top()\n self._items = self._items[:-1]\n return item", "def popitem(self):\n return self.pop(0)", "def pop(self):\n\n assert self.size > 0, \"Cannot pop item! The MaxHeap is empty!\"\n ret = self.items[0]\n self.items[0] = self.items[self.size - 1]\n self.items[self.size - 1] = None\n self.size -= 1\n self._shift_down(0)\n return ret", "def dequeue(self):\n if self.is_empty():\n raise Empty(\"Queue underflow.\")\n head = self._tail._next\n element = head._element\n self._size -= 1\n if self.is_empty():\n self._tail = None\n else:\n self._tail._next = head._next\n return element", "def dequeue(self):\n if self.is_empty():\n raise Empty(\"Queue underflow.\")\n element = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty():\n self._tail = None\n return element", "def pop(self):\n if self.is_empty():\n raise Empty(\"Stack underflow.\")\n self._size -= 1\n e = self._data[self._size]\n self._data[self._size] = None\n if 0 < self._size == self._capacity // 4:\n self._resize(self._capacity // 2)\n return e", "def dequeue(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is empty')\n\t\tanswer = self._head._element\n\t\tself._head = self._head._next\n\t\tself._size -= 1\n\t\tif self.is_empty():\n\t\t\tself._tail = None\n\t\treturn answer", "def pop(self, index=-1):\n if not self.stack:\n raise ReversePolishCalcError, \"Stack is empty\"\n try:\n del self.stack[index]\n except IndexError:\n errmsg = \"Cannot pop element '%s' from stack\" % index\n raise ReversePolishCalcError, errmsg\n return self.stack", "def pop(self):\n if len(self._substacks) == 0:\n raise Exception('Stack is empty.')\n else:\n if self._substacks[self._current_stack_index].size() == 1: # if last element in current stack,\n deleted_element = self._substacks[self._current_stack_index].pop()\n self._substacks.pop(self._current_stack_index) # remove the empty stack\n self._current_stack_index -= 1\n else:\n deleted_element = self._substacks[self._current_stack_index].pop()\n return deleted_element", "def dequeue(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n answer = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty(): # special case as queue is empty\n self._tail = None # removed head had been the tail\n return answer", "def pop(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Stack is empty')\n\t\tanswer = self._head._element\n\t\tself._head = self._head._next \n\t\tself._size -= 1\n\t\treturn answer", "def remove_last_item_from_stack(self):\n if self.length > 0:\n return self.container.pop()\n else:\n return None", "def pop(self):\n if not self.isEmpty():\n self.top -= 1\n return self.stack.pop()\n else:\n raise Exception(\"Stack Underflow\")", "def pop(self) -> int:\n return self.q.pop(0)", "def pop(self) -> int:\n return self.q.pop(0)", "def pop(self):\n if self.is_empty():\n raise ValueError\n\n item = self.linked_list.head\n self.linked_list.head = item.next\n\n item.next = None\n\n self.linked_list.node_count -= 1\n\n return item.data", "def pop(self):\n item = self.stack[-1]\n self.stack = self.stack[:-1]\n return item", "def pop(self):\n if self.__size == 0:\n return None\n else:\n data = self.__head.get_data()\n self.__head = self.__head.get_next()\n self.__size -= 1\n return data", "def pop(self):\n if self.is_empty():\n raise EmptyStackError\n else:\n return self._items.pop()", "def dequeue(self):\r\n if self.is_empty():\r\n raise Empty(\"Queue is empty\")\r\n answer = self._head._element\r\n self._head = self._head._next\r\n self._size -= 1\r\n if self.is_empty():\r\n self._tail = None\r\n return answer", "def pop(self):\n if len(self) == 0:\n if self.none_for_empty:\n return None\n raise ValueError(\"Buffer is empty\")\n pt = self.buf[self.front]\n if self.rear == self.front:\n self.rear = None\n else:\n self.front = self.length - 1 if self.front == 0 else self.front - 1\n return pt", "def pop(self) -> int:\n last = self.queue.popleft()\n while self.queue:\n self.aux_queue.append(last)\n last = self.queue.popleft()\n self.queue, self.aux_queue = self.aux_queue, self.queue\n return last", "def pop(self) -> int:\n return self.q.popleft()", "def pop(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val", "def dequeue(self) -> object:\n\n # Check if stack is empty\n if self.size() == 0:\n raise QueueException\n\n output = self.da.get_at_index(0)\n self.da.remove_at_index(0)\n return output", "def remove(self) -> T:\n if self.is_empty():\n raise EmptyStackError\n else:\n self._size -= 1\n return self._items.pop()", "def pop(self, index=-1):\n # type: (int) -> Any\n return self.value(list.pop(self, index))", "def test_pop_empty_list():\n from deque import Deque\n dq = Deque()\n with pytest.raises(IndexError):\n dq.pop()", "def dequeue(self):\n\n return self._data.pop(0)", "def pop(self):\n self.queue.insert(len(self.queue), self.queue[0])\n self.queue.remove(self.queue[0])\n return self.queue.pop()", "def pop(self):\n if not self.head:\n raise IndexError(\"Empty list, unable to pop\")\n output = self.head.data\n self.head = self.head.next\n self._counter -= 1\n return output", "def test_pop_empty_deque_raises_error(empty_deque):\n with pytest.raises(IndexError):\n empty_deque.pop()", "def dequeue(self):\n\t\treturn self.items.pop()", "def pop(self) -> int:\n if self.empty():\n raise ValueError(\"empty stack\")\n while len(self.que) != 1:\n self.tem_que.append(self.que.pop(0))\n ret = self.que.pop()\n while len(self.tem_que) > 0:\n self.que.append(self.tem_que.pop(0))\n return ret", "def test_pop_left_on_empty_deque():\n from deque import Deque\n dq = Deque()\n with pytest.raises(IndexError):\n dq.pop_left()", "def dequeue(self):\n return self.items.pop()", "def dequeue(self):\n return self.items.pop()", "def pop(self):\n if self.is_empty():\n raise Empty('Stack is empty')\n result = self._head._element\n self._head = self._head._next # bypass the former top node\n self._size -= 1\n return result", "def dequeue(self) -> object:\n return self._data.pop(0)", "def dequeue(self) -> object:\n return self._data.pop(0)", "def dequeue(self):\n return self.the_queue.pop(0)", "def pop(self):\n array = self.array\n item = array[0] \n if len(array) == 1:\n del array[0]\n else:\n compare = self.compare\n del self.pos[array[0]] \n array[0] = array.pop()\n self.pos[array[0]] = 0\n low, high = 0, 1\n while high < len(array):\n if ((high+1 < len(array)\n and compare(array[high], array[high+1]) > 0)):\n high = high+1\n if compare(array[low], array[high]) <= 0:\n break\n self.pos[array[high]] = low\n self.pos[array[low]] = high \n array[low], array[high] = array[high], array[low]\n low, high = high, 2*high+1\n return item", "def dequeue(self):\n if self.is_empty():\n raise Exception(\"Queue is empty !!! Please add data to the Queue :) \")\n else:\n return self.data.pop(0)", "def delete_last(self):\n self.deque.pop()", "def pop(self):\r\n return self.queue.pop(0)", "def pop(self):\n\n if self.empty():\n raise ValueError(\"Empty Stack\")\n\n data = self._data[self._head_pos]\n self._data[self._head_pos] = None\n self._head_pos -= 1\n self._size -= 1\n\n return data", "def pop(self, i: int) -> Any:\n if i < 0:\n i = self._length + i\n\n if i == 0:\n if self._first:\n item = self._first.item\n self._first = self._first.next\n self._length -= 1\n return item\n else:\n raise IndexError\n else:\n curr = self._first\n curr_index = 0\n\n while curr is not None:\n if curr_index == i - 1:\n if curr.next is None:\n raise IndexError\n else:\n item = curr.next.item\n curr.next = curr.next.next\n self._length -= 1\n return item\n\n curr = curr.next\n curr_index += 1\n raise IndexError", "def pop(self):\n if not self.empty():\n self.size -= 1\n return heapq.heappop(self.queue)\n else:\n return None", "def popitem(self):\n pass", "def pop(self):\n if self.is_empty():\n raise Empty('Stack is empty')\n return self._data.pop() # calling underlying Python list's pop method" ]
[ "0.77415574", "0.76645964", "0.76196545", "0.7619073", "0.74518174", "0.73892117", "0.7347819", "0.73183465", "0.72986156", "0.7289281", "0.7255884", "0.7225652", "0.721948", "0.721712", "0.72019696", "0.71845245", "0.7174988", "0.7146664", "0.7139901", "0.71300745", "0.71008176", "0.7079559", "0.70788455", "0.7059101", "0.70460546", "0.703343", "0.70224077", "0.7019088", "0.7013425", "0.7006933", "0.700632", "0.69937354", "0.69910735", "0.6982158", "0.6974713", "0.6969446", "0.6965583", "0.69563687", "0.6955784", "0.694744", "0.69352865", "0.6925177", "0.692411", "0.69134176", "0.69014513", "0.69001096", "0.6887661", "0.68798244", "0.68745893", "0.68732125", "0.68633795", "0.6860143", "0.6851715", "0.6847592", "0.68420017", "0.684088", "0.6835682", "0.6822907", "0.6822618", "0.6818403", "0.6810136", "0.67974716", "0.67935586", "0.6786937", "0.6786937", "0.6776919", "0.67765474", "0.67734665", "0.67612195", "0.6760323", "0.6760139", "0.67599195", "0.6757588", "0.6755386", "0.6749291", "0.67438275", "0.67405564", "0.67379886", "0.67361563", "0.67344296", "0.6728695", "0.67261434", "0.67204607", "0.6715794", "0.6706831", "0.6694849", "0.6694849", "0.6689825", "0.66832346", "0.66832346", "0.66827565", "0.6679288", "0.6667023", "0.6666572", "0.6660997", "0.66510034", "0.66444415", "0.66358626", "0.66340494", "0.663196" ]
0.7715742
1
Return True if the deque is empty.
def is_empty(self): return len(self._items) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_empty(self):\n return len(self.deque) == 0", "def empty(self) -> bool:\n return len(self._deque) == 0", "def empty(self) -> bool:\n return self.que == []", "def is_empty(self) -> bool:\n return self.peek(1) == []", "def is_empty(self):\n return len(self.the_queue) == 0", "def is_empty(self):\n return self.queue == []", "def empty(self) -> bool:\n return self.qsize() == 0", "def is_empty(self):\n return len(self.queue) == 0", "def is_empty(self):\n return len(self.queue) == 0", "def is_empty(self):\n return self._queue_items == []", "def is_empty(self):\n return len(self.__queue) > 0", "def empty(self) -> bool:\n return len(self.q) == 0", "def empty(self) -> bool:\n return len(self.q) == 0", "def empty(self):\n return len(self.queue) == 0", "def is_empty(self):\n if self.front:\n return False\n return True", "def is_empty(self):\n return not bool(self.front)", "def is_empty(self):\n return not self._queue", "def empty(self) -> bool:\n if self.queue.empty():\n return True\n else:\n return False", "def isEmpty(self):\r\n if (len(self.queue) >= 1):\r\n return False\r\n else:\r\n return True", "def empty(self):\r\n return self.queue == []", "def is_empty(self):\n\n return not self._queue", "def empty(self):\n return self.queue == []", "def isEmpty(self):\n return 0 == len(self.queue)", "def is_empty(self):\n\n if self.front == None:\n return True\n else:\n return False", "def is_empty(self):\n if self.front == None:\n return True\n else:\n return False", "def empty(self): \n return self.qsize() == 0", "def is_empty(self):\n\n # If the queue is an empty list, self._data would return False\n # So if the queue is empty we want to return true\n # modify with not self._data\n return not self._data", "def empty(self) -> bool:\n return not self.queue", "def empty(self) -> bool:\n return not self.queue", "def is_empty(self) -> bool:\r\n return self.size == 0", "def empty(self) -> bool:\n return self._queue.empty()", "def empty(self) -> bool:\n return self.size == 0", "def empty(self) -> bool: \n if(self.queue is not None and len(self.queue) > 0):\n print(\"len > 0\" )\n return False\n else:\n print(\"len = 0\" )\n return True", "def isEmpty(self):\n return self.front is None", "def is_empty(self):\n\t\treturn self._size == 0", "def is_empty(self):\n\t\treturn (self._size == 0)", "def empty(self) -> bool:\n return len(self.queue1) == 0", "def is_empty(self):\n return len(self.priority_queue) == 0", "def is_empty(self) -> bool:\n return self.heap.length() == 0", "def is_empty(self) -> bool:\n return self.heap.length() == 0", "def is_empty(self):\r\n return self.buff==[]", "def is_empty(self):\r\n\r\n return self._size == 0", "def is_empty(self):\r\n return self._size == 0", "def is_empty(self):\r\n return self._size == 0", "def empty(self) -> bool:\n return self.data.get_size() == 0", "def empty(self):\n return not self.queue", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n if self.size() == 0:\n return True\n else:\n return False", "def is_empty(self):\n if self._size == 0:\n return True\n return False", "def is_empty(self):\n return self.size() == 0", "def is_empty(self):\n return self.size() == 0", "def empty(self):\n return self.q_size.current_value == 0", "def empty(self) -> bool:\n if self.push_queue or self.pop_queue:\n return False\n return True", "def is_empty(self):\n return self.size == 0", "def is_empty(self):\n return self.size == 0", "def is_empty(self):\n return self.size == 0", "def is_empty(self):\n return self.size == 0", "def is_empty(self):\n return self.size == 0", "def is_empty(self):\n\n if self.size() == 0:\n return True\n else:\n return False", "def is_empty(self):\r\n if self.size == 0:\r\n return True\r\n return False", "def is_empty(self):\n return self._size == 0", "def empty(self) -> bool:\n\n return (self._size == 0)", "def empty(self): # O(1)\n return not self.queue", "def is_empty(self):\n return len(self.container) == 0", "def empty(self) -> bool:\n return not bool(self.q)", "def _is_empty(self):\n return self.size == 0", "def empty(self):\n return self.size == 0", "def empty(self):\n return self.size == 0", "def empty(self):\n return self._size is 0", "def is_empty(self):\n return self.size == []", "def is_empty(self):\n return self._head is self._tail is None", "def test_empty_deque_is_empty(empty_deque):\n assert empty_deque.size() == 0", "def empty(self):\n return self.queue[0].empty() and self.queue[1].empty()", "def isEmpty(self) -> bool:\n return self.front == self.rear", "def empty(self):\n return self.size() == 0", "def is_empty(self):\n return self.__size == 0", "def is_empty(self) -> bool:\n return self._first is None", "def is_empty(self):\n return self.heap.is_empty()", "def is_empty(self):\n return self.__len__() == 0", "def is_empty(self):\n return len(self.__heap) == 0", "def empty(self) -> bool:\n return len(self.q1) >= 0", "def is_empty(self):\n\n return self.head is None", "def isEmpty(self):\n return self.qSize == 0", "def is_empty(self):\n\n if self.head == None:\n return True\n else:\n return False", "def is_empty(self):\n\n if self.head == None:\n return True\n else:\n return False", "def empty(self) -> bool:\n return self.q1==[]" ]
[ "0.92402023", "0.9101068", "0.86111414", "0.8478603", "0.8439654", "0.83985174", "0.83917534", "0.83707136", "0.83707136", "0.8296455", "0.8283977", "0.8280236", "0.8280236", "0.8274057", "0.82652426", "0.8263581", "0.8238711", "0.8222698", "0.82093155", "0.82078135", "0.8205747", "0.820295", "0.81946665", "0.81897306", "0.8186252", "0.81763244", "0.81675905", "0.81326693", "0.81326693", "0.8114591", "0.8102482", "0.80912215", "0.80878955", "0.80617183", "0.8046883", "0.8045736", "0.8043181", "0.8043047", "0.80368495", "0.80368495", "0.80198705", "0.80147016", "0.8013906", "0.8013906", "0.80061847", "0.7990263", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79865164", "0.79776084", "0.7972306", "0.79722625", "0.79722625", "0.7964635", "0.7962717", "0.79593277", "0.79593277", "0.79593277", "0.79593277", "0.79593277", "0.79591227", "0.79526424", "0.79482335", "0.79450923", "0.7944691", "0.794272", "0.7942554", "0.7930823", "0.7930528", "0.7930528", "0.79120773", "0.7911708", "0.7911155", "0.79066867", "0.78881526", "0.78866947", "0.78855646", "0.78774416", "0.7877196", "0.7877167", "0.7863028", "0.78523177", "0.7830224", "0.78127766", "0.78067803", "0.77915317", "0.77915317", "0.7791096" ]
0.0
-1
Return the length of the deque.
def __len__(self): return len(self._items)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\n return len(self.deque)", "def __len__(self):\r\n return len(self.deque)", "def __len__(self) -> int:\n return len(self._data_queue)", "def __len__(self):\n return len(self.cumulative_length)", "def length(self):\n return len(self.container)", "def __len__(self):\n\n return len(self._queue)", "def size(self):\r\n return len(self.queue)", "def size(self):\r\n return len(self.queue)", "def length(self):\n return self.linked_list.length()", "def __len__(self):\n with self._lock:\n return len(self._items)", "def __len__(self):\n with self._lock:\n return len(self._items)", "def size(self):\n\n size = 1\n traverse = self.front\n if self.front == None:\n return 0\n\n while traverse.next != None:\n traverse = traverse.next\n size += 1\n return size", "def size(self):\n\n size = 1\n traverse = self.front\n if self.front == None:\n return 0\n\n while traverse.next != None:\n traverse = traverse.next\n size += 1\n return size", "def size(self):\n return len(self.queue)", "def size(self):\n return len(self.queue)", "def size(self):\n return len(self.queue)", "def size(self):\n return len(self.queue)", "def len(self) -> int:\n\n return int(self.q)", "def len(self):\n return self.__len__()", "def size(self):\n count = 0\n current = self.front\n\n while current is not None:\n current = current.getPtr()\n count += 1\n\n return count", "def get_length(self):\r\n return len(self.deck)", "def getLen(self):\n return len(self.data)", "def size(self):\n return len(self.__queue)", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def getLength(self):\n return self.count", "def length(self):\n return self.count", "def getLen(self):\n return self.len", "def length(self) -> int:\n return self.size", "def length(self) -> int:\n return self.size", "def len (self):\n\t\treturn len (self.data)", "def length(self):\n # TODO: Count number of items\n return self.list.size", "def size(self):\n return len(self._queue_items)", "def len(self):\n return self._size", "def length(self):\n if self.is_null():\n return 0\n return self.end - self.begin", "def __len__(self) -> int:\n\n return self.__size", "def __len__(self) -> int:\n return self._length", "def __len__(self) -> int:\n return self._len", "def qsize(self):\r\n return len(self._queue)", "def length(self) -> int:\r\n\r\n return self.__length", "def __len__(self):\n # type: () -> int\n return len(self.data)", "def __len__(self) -> int:\n return len(self.buffer)", "def length(self) -> int:\n return len(self.data)", "def __len__(self) -> int:\n return len(self._data)", "def __len__(self) -> int:\n return len(self._data)", "def __len__(self) -> int:\n return len(self.__elements)", "def size(self) -> int:\n return self.length", "def size(self):\r\n return self.__length", "def length(self):\n return self.counter", "def length(self):\n return self.__length", "def length(self):\n return self.__length", "def Length(self):\n return len(self.jobQueue)", "def getLength(self):\r\n return len(self.list)", "def length(self):\n return self.list.length", "def length(self):\n return self.list.length", "def getLength(self):\n return len(self.entries)", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def len(self):\n return self.n", "def __len__( self ):\n \n return len(self.__deck)", "def __len__(self):\n\n return len(self._block_queue)", "def test_full_deque_has_length(full_deque):\n assert full_deque._deque._length == 3", "def size(self):\n return self.__length", "def __len__(self) -> int:\n return len(self.getvalue())", "def length(self):\n return len(self.data)", "def length(self):\n return len(self.data)", "def length(self):\n return len(self.data)", "def length(self):\n return self._length", "def length(self):\n return self._length", "def __len__(self) -> int:\n if self.data is None:\n return 0\n return len(self.data)", "def __len__(self) -> int:\n return len(self.data)", "def __len__(self) -> int:\n return len(self.data)", "def __len__(self) -> int:\n return len(self.data)", "def get_length(self):\n pointer = self.head\n counter = 0\n while pointer:\n counter += 1\n pointer = pointer.next_node\n return counter", "def __len__(self):\n return self._number_of_items", "def length(self):\n return self.list.length()", "def __len__(self):\n\t\treturn self.qsize()", "def __len__(self):\n return len(self.container)", "def get_length(self):\n\t\treturn len(self._blocks)", "def __len__(self):\n return self.__length", "def size(self):\n return self._length", "def size(self) -> int:\r\n return self.da.length()", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def __len__(self):\n return self._count", "def __len__(self):\n return self._count", "def length(self):\n return self.heap.size()", "def length(self):\n return len(self.items)", "def __len__(self):\n\t\treturn self._size", "def __len__(self) -> int:\n return self.size", "def __len__(self) -> int:\n return self.size", "def __len__(self) -> int:\n return self.size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size" ]
[ "0.86527514", "0.86125225", "0.74804485", "0.7163367", "0.7139454", "0.7136019", "0.7052766", "0.7052766", "0.70522916", "0.7034566", "0.7034566", "0.70223486", "0.70223486", "0.7014049", "0.7014049", "0.7014049", "0.7014049", "0.70028573", "0.6997984", "0.6995006", "0.698651", "0.69851065", "0.698042", "0.6974823", "0.6974823", "0.6974823", "0.6974487", "0.69409287", "0.6936721", "0.6912027", "0.6912027", "0.68979067", "0.68901634", "0.6888426", "0.68849444", "0.6882859", "0.68765277", "0.68598133", "0.6854601", "0.6853455", "0.68482983", "0.6846642", "0.68430144", "0.68411636", "0.6811153", "0.6811153", "0.6797747", "0.6797536", "0.67934275", "0.6790358", "0.67893773", "0.67893773", "0.67791307", "0.67782634", "0.6778045", "0.6778045", "0.6777405", "0.6776472", "0.6776472", "0.6776472", "0.6771475", "0.67558295", "0.6754483", "0.67543775", "0.67524195", "0.67481875", "0.6747402", "0.6747402", "0.6747402", "0.6745287", "0.6745287", "0.67446625", "0.67366225", "0.67366225", "0.67366225", "0.6730491", "0.6729168", "0.6726077", "0.67254245", "0.67226857", "0.6717987", "0.67174745", "0.6714978", "0.671182", "0.67087936", "0.6707461", "0.6707461", "0.6703986", "0.67009073", "0.6694175", "0.6692562", "0.6692562", "0.6692562", "0.66829604", "0.66829604", "0.66829604", "0.66829604", "0.66829604", "0.66829604", "0.66829604", "0.66829604" ]
0.0
-1
Return the representation of the deque
def __repr__(self): return "Front -> " + repr(self._items) + " <- Rear"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n return 'Deque([{0}])'.format(','.join(str(item) for item in self))", "def __repr__(self):\n return 'Deque([{0}])'.format(','.join(str(item) for item in self))", "def __str__(self):\n return str(self.deque)", "def __repr__(self):\n return 'Deque({})'.format(self.length())", "def __repr__(self):\n return 'PriorityQueue({} items, front={})'.format(self.size(), self.front())", "def __repr__(self):\n return str(self._queue_items)", "def __str__(self):\n out = '['\n if self.head != None:\n cur = self.head\n out = out + str(self.head)\n cur = cur.next\n while cur != None:\n out = out + ' -> ' + str(cur)\n cur = cur.next\n out = out + ']'\n return out", "def __repr__(self):\n return 'ResizingArrayQueue([{}])'.format(', '.join(repr(i) for i in self))", "def __str__(self):\r\n out = \"QUEUE: \" + str(self.da.length()) + \" elements. [\"\r\n out += ', '.join([str(self.da.get_at_index(_))\r\n for _ in range(self.da.length())])\r\n return out + ']'", "def __str__(self):\n out = \"QUEUE: \" + str(self.da.length()) + \" elements. [\"\n out += ', '.join([str(self.da.get_at_index(_))\n for _ in range(self.da.length())])\n return out + ']'", "def to_bytes(self):\n return b\"\".join(self.output_deque)", "def __repr__ (self):\n\t\ts=[];add=s.append\n\t\tfor item in self.data:\n\t\t\tadd (str(item))\n\t\treturn join (s, '\\n')", "def __repr__(self):\n return 'Queue({})'.format(self.length())", "def __repr__(self):\n nodes = []\n curr = self.head\n while curr:\n nodes.append(repr(curr))\n curr = curr.next\n return '[' + ', '.join(nodes) + ']'", "def __repr__(self):\n nodes = []\n curr = self.head\n while curr:\n nodes.append(repr(curr))\n curr = curr.next\n return '[' + ', '.join(nodes) + ']'", "def __str__(self):\n\n return LinkedList.str_recursive(self.front)", "def __str__(self) -> str:\n out = \"QUEUE: \" + str(self.da.size) + \" elements. \"\n out += str(self.da.data[:self.da.size])\n return out", "def __str__(self):\n resultStr = \"(rear)\"\n current = self._rear # point current at rear node\n while current != None: # while current points to a node\n resultStr = resultStr + \" \" + str(current.getData())\n current = current.getNext() # move current to next node\n resultStr = resultStr + \" (front)\"\n return resultStr", "def __repr__(self):\n nodes = []\n current = self.head\n while current:\n nodes.append(repr(current))\n current = current.next\n\n return '[' + ','.join(nodes) + ']'", "def __str__(self):\n\t\tstrBuffer = \"[\"\n\t\ttemp = self.head\n\t\tcount = 0\n\n\t\twhile temp.getNext() != None:\n\t\t\t#print temp.getData()\n\t\t\tstrBuffer += temp.getData() + \", \"\n\t\t\tcount += 1\n\t\t\tif count % 20 == 0:\n\t\t\t\tstrBuffer = strBuffer[:-1] + '\\n' \n\t\t\ttemp = temp.getNext()\n\t\t\n\t\tstrBuffer += (temp.getData() + ']') #off by one fix\n\t\t\t\n\t\treturn strBuffer", "def __str__(self):\n result = [] \n node = self.head\n while node is not None:\n result.append(str(node.value))\n node = node.next_node \n return '[' + ', '.join(result) + ']'", "def __str__(self):\n result = [] \n node = self.head\n while node is not None:\n result.append(str(node.value))\n node = node.next_node \n return '[' + ', '.join(result) + ']'", "def __repr__(self):\n temp_node = self.head\n values = []\n if temp_node is None:\n return str([])\n while temp_node is not None:\n values.append(temp_node.value)\n temp_node = temp_node.next_node\n return str(values)", "def __str__(self):\n return '[' + ', '.join([str(x) for x in self.elem]) + ']'", "def __init__(self):\n self.deque = []", "def __repr__(self):\n return self._to_deck()", "def __repr__(self):\n outlist = []\n for idx in range(len(self)):\n outlist.append(repr(self[idx]))\n return f\"({', '.join(outlist)})\"", "def __init__(self):\r\n self.deque = deque()", "def encode_queue(self, queue):\n return \"[\" + \",\".join(queue) + \"]\"", "def __init__(self):\n self.contents = deque()", "def as_string(self):\n\n\t\tout = []\n\t\tn = self\n\n\t\twhile n:\n\t\t\tout.append(str(n.data))\n\t\t\tn = n.next\n\n\t\treturn \"\".join(out)", "def full_deque():\n from deque import Deque\n populated_deque = Deque(iterable=[1, 2, 3])\n return populated_deque", "def __str__(self):\n s =\"\"\n if self.data is 0:\n return \"[]\"\n else:\n for i in range(len(self.data)):\n s += str(self.data[i])\n if i != len(self.data)-1:\n s += \", \"\n return \"[\" + s + \"]\"", "def serialize(self, root):\n q = deque()\n q.append(root)\n res = []\n while len(q) > 0:\n cur = q.popleft()\n if cur:\n res.append(str(cur.val))\n q.append(cur.left)\n q.append(cur.right)\n else:\n res.append(\"null\")\n return \"[\" + \",\".join(res) + \"]\"", "def __repr__(self):\n\n nodes = []\n current = self.head\n\n while current:\n if current is self.head:\n nodes.append('[Head: %s]' % current.data)\n elif current.next_node is None:\n nodes.append('[Tail: %s]' % current.data)\n else:\n nodes.append('[%s]' % current.data)\n current = current.next_node\n\n return '-> '.join(nodes)", "def __str__(self) -> str:\n return '[' + ' -> '.join([str(element) for element in self]) + ']'", "def __iter__(self):\r\n return self.deque.__iter__()", "def __init__(self):\r\n self.q = deque()", "def __init__(self):\n self._deque = []", "def __repr__(self):\n return str([(n,c,str(p)) for (n,c,p) in self.frontierpq])", "def __init__(self):\n \n self.elements = deque()", "def __repr__(self):\n return 'Stack({} items, top={})'.format(self.length(), self.peek())", "def __repr__(self):\n return 'Stack({} items, top={})'.format(self.length(), self.peek())", "def __repr__(self):\n return 'Stack({} items, top={})'.format(self.length(), self.peek())", "def __repr__(self):\n return 'Stack({} items, top={})'.format(self.length(), self.peek())", "def __str__(self):\n\n final_string = \"\"\n\n current = self.front\n\n while current:\n final_string += f\"{{{current.value}}} -> \"\n current = current.next\n\n return f\"{final_string}NULL\"", "def serialize(self, root):\n queue = collections.deque([root])\n list = []\n while queue:\n node = queue.popleft()\n if node:\n queue.append(node.left)\n queue.append(node.right)\n\n list.append(str(node.val))\n else:\n list.append('n')\n return list", "def __str__(self):\n elements = []\n current = self._head\n while current:\n elements.append(str(current.val))\n current = current.next\n return ' -> '.join(elements)", "def __str__(self) -> str:\n out = \"BAG: \" + str(self.da.length()) + \" elements. [\"\n out += ', '.join([str(self.da.get_at_index(_))\n for _ in range(self.da.length())])\n return out + ']'", "def __str__(self) -> str:\n out = \"BAG: \" + str(self.da.length()) + \" elements. [\"\n out += ', '.join([str(self.da.get_at_index(_))\n for _ in range(self.da.length())])\n return out + ']'", "def as_string(self):\n\n ll_elements = []\n current = self\n while current:\n ll_elements.append(str(current.data))\n current = current.next\n\n return \"->\".join(ll_elements)", "def __str__(self):\n str_list = ['[']\n for i in self.data_list:\n str_list.append(str(i))\n str_list.append(', ')\n str_list.pop() # remove trailing space\n str_list.append(\"]\")\n\n return ''.join(str_list)", "def __repr__(self):\n return \"heap:[\" + ','.join(map(str, self.ar[:self.n])) + \"]\"", "def __repr__(self):\n dataListStrs = []\n for data in self:\n dataListStrs.append(repr(data))\n dataListStrs.append(\"None\")\n return \" -> \".join(dataListStrs)", "def dequeue(self):\n\n temp = self.front\n self.front = self.front.next\n return temp.data", "def __str__(self):\n s = \"--\\n\"\n for element in self:\n s += element.__str__() + \"\\n\"\n s += \"--\"\n \"\"\"\n # Uncomment if you want to see the internal structure\n s = \"\\n--\\n\"\n for i in xrange(self.size):\n s += \"%d [%s, %s]\\n\" % ( i, self.slot[i], self.data[i] )\n s += \"--\"\n \"\"\"\n return s", "def __str__ (self) :\r\n a = []\r\n next_get = self.nextGet_\r\n buffer = self.buff_\r\n length = self.capacity()\r\n for x in xrange(len(self)) :\r\n a.append(str(buffer[next_get]))\r\n a.append(\" \")\r\n next_get = (next_get+1) % length\r\n \r\n return \"\".join(a)", "def __init__(self):\n self.container = collections.deque()", "def __repr__(self):\n return \"multiset([%s])\" % \", \".join(repr(x) for x in self)", "def __repr__(self):\n return \"{}\".format(self._head)", "def __str__(self) -> str:\n ret = StringBuilder(\"\")\n current = self.head\n while current:\n ret += current.info\n current = current.next\n return str(ret)", "def _cls_repr(self):\n return [] # TODO: return I guess samples/labels/chunks", "def __repr__(self):\n returnValue = \"\"\n for element in self.cards:\n returnValue += str(element) + ', '\n return returnValue[:-2]", "def __str__(self):\n return \"->\".join([str(n.data) for n in self.as_list()])", "def serialize(self):\r\n values = ','.join(i.serialize() for i in self.curves)\r\n return \"[{}]\".format(values)", "def serialize(self, root):\n if root is None:\n return []\n \n queue = deque([root])\n ans = []\n ans.append(['#', root.val])\n \n while queue:\n node = queue.popleft()\n child_list = []\n for child in node.children:\n child_list.append(child.val)\n queue.append(child)\n ans.append([node.val, child_list])\n \n return ans", "def __str__(self):\n\n result = \"\"\n\n temp = self.head\n while temp is not None:\n result += str(temp.data) + \" -> \"\n temp = temp.next\n\n return result[0:-4]", "def filled_deque():\n from deque import Deque\n new_filled_deque = Deque([4, 3, 2, 1])\n return new_filled_deque", "def serialize(self, root):\n if not root:\n return '[]'\n queue = collections.deque([root])\n ans = []\n while queue:\n node = queue.popleft()\n if not node:\n ans.append('null')\n continue\n ans.append(str(node.val))\n queue.extend([node.left, node.right])\n return '['+','.join(ans)+']'", "def __init__(self):\n self.q = deque()", "def __str__(self):\n\n list_str = ''\n current = self.head\n while current:\n # print(current, \"current\")\n list_str += str(current.value ) + ', '\n current = current.next\n \n return list_str[:-2]", "def to_string(self):\n try:\n items = \" \"\n current = self.head\n while current:\n items += f\"{ {current.value} }->\"\n current=current.next\n items+=\"NULL\"\n print (items)\n return items\n # items.append(current.value)\n # current = current.next\n # print(''.join(f\"{ {k[1]} }->\" for k in enumerate(items))+'NULL')\n # return(''.join(f\"{ {k[1]} }->\" for k in enumerate(items))+'NULL')\n except Exception as error:\n print (f\"There is error in __init__ of LinkedList, the error {error}\")", "def __repr__(self):\n s = [\"{} vertices, {} edges\\n\".format(self._V, self._E)]\n for v in range(self._V):\n s.append(\"%d : \" % (v))\n for w in self._adj[v]:\n s.append(\"%d \" % (w))\n s.append(\"\\n\")\n\n return \"\".join(s)", "def __str__(self):\n data_str = [str(i) for i in self._data]\n return \"QUEUE { \" + \", \".join(data_str) + \" }\"", "def __str__(self):\n data_str = [str(i) for i in self._data]\n return \"QUEUE { \" + \", \".join(data_str) + \" }\"", "def __str__(self):\n\n list_str = ''\n current = self.head\n while current:\n # print(current, \"current\")\n list_str += str(current.value ) + ', '\n current = current.next\n return list_str[:-2]", "def __str__(self):\n cur_node = self.head\n str_list = ['{']\n while cur_node is not None:\n str_list.append(str(cur_node))\n if cur_node is not self.tail:\n str_list.append(', ')\n cur_node = cur_node.next_node\n str_list.append('}')\n return ''.join(str_list)", "def __str__(self):\n return \"({},{},{})\".format(self.tail, self.head, self.weight)", "def peek_back(self):\n # if the deque is empty\n if self.is_empty():\n # raise an IndexError\n raise IndexError()\n # if deque is not empty, return back's data\n return self.back.data", "def state(self):\n new_list = []\n item = self.head\n while item:\n new_list.append(item.data)\n item = item.next\n return str(new_list).replace(' ', '')", "def __repr__(self):\n return str(((\"P\" if self.is_P() else \"Q\"),self._children))", "def __str__(self) -> str:\n out = \"STACK: \" + str(self.da.length()) + \" elements. [\"\n out += ', '.join([str(self.da.get_at_index(_))\n for _ in range(self.da.length())])\n return out + ']'", "def __str__(self):\n c = self\n ans = \"[\"\n while c:\n ans += \".\"\n c = c.next\n return ans + \"]\"", "def __str__(self):\n\t\tself._synchronize_attributes()\n\t\ts = \"\"\n\t\tqueue = c3.Queue()\n\t\tlevel = 0\n\t\tqueue.enqueue((1, self._root))\n\t\twhile queue.peek():\n\t\t\tnodelev, node = queue.dequeue()._data\n\t\t\tif (not node):\n\n\t\t\t\t#NODE IS NOT THERE - just a placeholder\n\t\t\t\t#print spacing and enqueue fake left and right children\n\t\t\t\t#but stops if they would be past the max depth of the tree\n\t\t\t\tif ((self._depth - nodelev + 1) <= 0):\n\t\t\t\t\tcontinue\n\n\t\t\t\tif (nodelev != level):\n\t\t\t\t\ts += \"\\n\"\n\t\t\t\t\t#PRINT THE INDENT\n\t\t\t\t\tindent = \" \"*int((self._max_chars)*(2**(self._depth - nodelev) - 1))\n\t\t\t\t\ts += indent\n\t\t\t\t\tlevel = nodelev\n\n\t\t\t\t#PRINT THE SPACING\n\t\t\t\ts += \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\n\t\t\t\t#PRINT SPACES TO REPLACE DATA\n\t\t\t\ts += \" \"*self._max_chars\n\n\t\t\t\t#Enqueue fake children\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tcontinue\n\n\t\t\tif (nodelev != level):\n\t\t\t\ts += \"\\n\"\n\t\t\t\t#PRINT THE INDENT\n\t\t\t\tindent = \" \"*(self._max_chars)*(2**(self._depth - nodelev) - 1)\n\t\t\t\ts += indent\n\t\t\t\tlevel = nodelev\n\n\t\t\t#adds preceding \"|\"s if the str length of the data is smaller than the max\n\t\t\tfor i in range(int(self._max_chars - len(str(node.value())))):\n\t\t\t\ts += \"|\"\n\t\t\ts += str(node.value()) \n\n\t\t\t#PRINT THE SPACING\n\t\t\tspacing = \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\t\t\ts += spacing\n\n\t\t\t#Enqueues\n\t\t\tif node.lchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.lchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\tif node.rchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.rchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\ts += \"\\n\"\n\t\treturn s", "def __repr__(self):\n s = \"\"\n for v in self.V():\n s += f\"{v.__repr__()}\\n\"\n \n return s", "def __repr__(self):\n return \"Bag({})\".format(self.items)", "def __str__(self):\n return str(self._heap)", "def serialize(self):\n return self.instantiate_queue()", "def display(self):\n container = []\n current = self.head\n while current is not None:\n container.append(current.val)\n current = current.next\n print(tuple(container))\n return tuple(container)", "def show(self):\n\n if self.front == None:\n print(\"Queue is empty\")\n return\n\n while self.front.next != None:\n print(self.front.data)\n self.front = self.front.next\n\n print(self.front.data)", "def dequeue(self):\n temp = self.front\n self.front = self.front.getPtr()\n return temp.getData()", "def serialize(self, root):\n if not root:\n return \"[]\"\n queue = collections.deque()\n queue.append(root)\n res = []\n while queue:\n node = queue.popleft()\n if node:\n res.append(str(node.val))\n queue.append(node.left)\n queue.append(node.right)\n else:\n res.append(\"null\")\n return '[' + ','.join(res) + ']'", "def __str__(self) -> str:\n result = \"[\"\n for i in range(len(self)):\n if i > 0:\n result += ', '\n result += str(self[i])\n result += ']'\n return result", "def __str__(self):\n reprStr = ''\n currNode = self.head\n while currNode:\n reprStr = reprStr + str(currNode.count) + ' ' + str(currNode.data) + '\\n'\n currNode = currNode.next\n return reprStr", "def display(self):\n node = self.head\n display_this = []\n while node:\n display_this.append(node.data)\n node = node.next\n return str(display_this).replace(\"[\", \"(\").replace(\"]\", \")\")", "def __repr__(self):\n return str(self.data)", "def __repr__(self):\n\t\treturn repr(self.data)", "def __repr__(self):\n return str(self._stack_items)", "def __str__(self):\r\n\r\n if self._size > 0:\r\n\r\n lst = [str(self._data[item]) for item in range(self._size)]\r\n str1 = str(lst) + \" Capacity: \" + str(self._capacity)\r\n\r\n return str1\r\n\r\n else:\r\n return \"Empty Stack\"", "def print_queue(self):\n for value in self.data:\n element = f'| {value} |'\n print(element)" ]
[ "0.82023656", "0.82023656", "0.7984831", "0.7795593", "0.6640371", "0.6613635", "0.65820986", "0.64977366", "0.6457218", "0.64357615", "0.6344762", "0.6335218", "0.63308865", "0.6327256", "0.6327256", "0.6296222", "0.62726325", "0.62721485", "0.6218142", "0.61443806", "0.61336154", "0.61336154", "0.61163634", "0.6105947", "0.60993403", "0.60869694", "0.6084775", "0.6073211", "0.6073171", "0.60424507", "0.6041646", "0.60096097", "0.60089964", "0.6008664", "0.5961566", "0.5960984", "0.5960543", "0.59288466", "0.59215164", "0.59139967", "0.5895079", "0.58735543", "0.58735543", "0.58735543", "0.58735543", "0.58703744", "0.58534795", "0.58389306", "0.58285135", "0.58285135", "0.5812286", "0.5797806", "0.5791159", "0.5789064", "0.57880545", "0.5769457", "0.5767346", "0.57469434", "0.57468677", "0.5739864", "0.573808", "0.57355577", "0.5728539", "0.56974596", "0.56950456", "0.5694237", "0.5690614", "0.56833726", "0.56822854", "0.568142", "0.5666887", "0.56647784", "0.56638086", "0.5654252", "0.5654252", "0.5649292", "0.56457096", "0.56388736", "0.562844", "0.5623631", "0.56232965", "0.5622738", "0.5620604", "0.561947", "0.5616047", "0.5606147", "0.559664", "0.55936325", "0.5589525", "0.5586463", "0.55845886", "0.5581832", "0.5580501", "0.5576542", "0.55754596", "0.55584335", "0.55582213", "0.5554474", "0.5543049", "0.553159" ]
0.60730755
29
Returns the full path for a relative path
def relative_path(__file__, path): return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_relative_path(path: str):\n return os.path.relpath(path, get_project_root())", "def relative(self, path):\n return re.sub(self.path_regex, '', path).lstrip(os.sep)", "def get_path(relative_path=None):\n\n root_path = os.path.dirname(os.path.dirname(__file__))\n\n if relative_path is None:\n return root_path\n else:\n return os.path.abspath(os.path.join(root_path, relative_path))", "def relativePath(path):\n spath = pathSplit(path)\n return spath[-1]", "def get_relative_path(self, file_path):\n file_path = os.path.abspath(file_path)\n if self.base_dir is not None:\n file_path = file_path.replace(os.path.abspath(self.base_dir), \"\")\n assert file_path[0] == \"/\"\n file_path = file_path[1:]\n return file_path", "def get_abs_path(file_path, relative_path):\n import os\n dir_path = os.path.dirname(file_path)\n abs_path = os.path.join(dir_path, relative_path)\n return abs_path", "def _get_relative_path(self, abs_path):\r\n relative_path = os.path.relpath(abs_path, settings.PROJECT_ROOT)\r\n return relative_path", "def getAbsolutePath(relPath):\n currDir = os.path.dirname(__file__)\n return os.path.join(currDir, relPath)", "def get_abs_path(relative_path):\n if os.path.isabs(relative_path):\n return relative_path\n path_parts = relative_path.split(os.sep)\n abs_path = os.path.abspath('.')\n for path_part in path_parts:\n abs_path = os.path.abspath(os.path.join(abs_path, path_part))\n return abs_path", "def get_relative_path(dir, full_path):\n if dir[-1] == '/' or dir[-1] == ':':\n return full_path[ len(dir) : ]\n else:\n return full_path[ len(dir)+1 : ]", "def resolve_relative_path(path):\n if os.path.isabs(path):\n return path\n root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n return os.path.join(root_dir, path)", "def _absPath(self, relpath):\n\n # Pass through URIs and absolute paths.\n if self.isUrl(relpath) or relpath[0] == '/':\n return relpath\n\n # This won't deal with ~user/ syntax, but it's much less\n # common anyway.\n if relpath.startswith('~/') and 'HOME' in os.environ:\n return os.path.join(os.environ['HOME'], relpath[2:])\n\n if self._configFileStack:\n relativeTo = os.path.dirname(self._configFileStack[-1])\n else:\n relativeTo = os.getcwd()\n\n if self.isUrl(relativeTo):\n parts = urlparse.urlsplit(relativeTo)\n return urlparse.urlunsplit((parts.scheme, parts.netloc, os.path.normpath(os.path.join(parts.path, relpath)), parts.query, parts.fragment))\n return os.path.normpath(os.path.join(relativeTo, relpath))", "def resource_path(relative_path):\n return os.path.join(BASEPATH, relative_path)", "def get_relative_path(path, start_path=\"\"):\r\n if start_path:\r\n rel_path = lib_path.relpath(path, start_path)\r\n else:\r\n rel_path = lib_path.relpath(path)\r\n return rel_path", "def relative_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relative_path\")", "def relative_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relative_path\")", "def relative_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relative_path\")", "def relative_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relative_path\")", "def relative_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relative_path\")", "def _abs_path(rel_path):\n return os.path.join(BASE_DIR, rel_path)", "def get_relative_path(path):\r\n components = split_all(path)\r\n if len(components) <= 1:\r\n return os.curdir\r\n else:\r\n parents = [os.pardir] * (len(components) - 1)\r\n return os.path.join(*parents)", "def relative_path(root_dir, dirpath, f):\n full = os.path.join(dirpath, f)\n if not root_dir:\n return full\n if not full.startswith(root_dir):\n print(\"ERROR - bad path for root\", full)\n return None\n full = full[len(root_dir):]\n if full.startswith(\"/\"):\n return full[1:]\n return full", "def full_path(self):\n return os.path.abspath(self.path)", "def relpath(path):\n\n return os.path.relpath(path).replace(\"\\\\\", \"/\")", "def absolute(self):\n if self.relative == '':\n return self.root # don't join in this case as that appends trailing '/'\n return os.path.join(self.root, self.relative)", "def absolute_physical_path(self) -> str:\n return self._path", "def relative_path(filename):\n length = len(os.path.abspath(DOC_BUILD_DIR)) + 1\n return os.path.abspath(filename)[length:]", "def get_relative_path(self):\n return urlparse(self.browser.current_url).path", "def path(cls, relpath=None):\r\n base = os.getcwd() if not ParseContext._active else cls.locate().current_buildfile.parent_path\r\n return os.path.abspath(os.path.join(base, relpath) if relpath else base)", "def abspath(self, ref):\n \n directory, path = get_location(self.directory, ref.strip(),\n current=dirname(self.relative))\n path = join_fb_root(join(directory, path))\n return path", "def _GetRelPath(self, filename):\r\n absname = os.path.join(self.repo_dir, filename)\r\n return os.path.relpath(absname)", "def _relpath(self, path):\n\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n path = os.path.normpath(unquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = ''\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word not in (os.curdir, os.pardir):\n path = os.path.join(path, word)\n\n return path", "def rel_resolve(path):\n if os.path.isabs(path):\n return os.path.abspath(path)\n else:\n return os.path.join(SCRIPTDIR, path)", "def _get_rel_path(self, file_path: Union[str, os.PathLike]) -> Optional[str]:\n file_path = Path(file_path).absolute()\n try:\n # use os.path.relpath instead of Path.relative_to in case file_path is not a child of self.base_path\n return os.path.relpath(file_path, self.base_path)\n except ValueError:\n # 2 paths are on different drives\n return None", "def build_relative_path(full_path, prefix='/', split_on='/data/'):\n splits = full_path.split(split_on)\n return os.path.join(prefix, split_on, splits[-1])", "def relativize(path: str):\n return join('.', path)", "def _GetRelPath(self, filename):\n assert filename.startswith(self.subdir), (filename, self.subdir)\n return filename[len(self.subdir):].lstrip(r\"\\/\")", "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "def build_relpath(self):\n return join_path(\"..\", self.build_dirname)", "def abs_path(self) -> str:\n full_path = '/'.join(folder.name for folder in reversed(self.ancestors))\n return f'/{full_path}/'", "def absPath(path):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), path)", "def get_relative_pathname(self):\n return os.path.join(Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2],\n str(self.unique_id) + self.file_ext)", "def inputpathrel(self):\n if self.config:\n relpath = os.path.relpath(self.inputpath, self.config.workingdir)\n\n if relpath.startswith(\"../\"):\n return self.inputpath\n\n else:\n return relpath\n\n return self.inputpath", "def relpath(path, start=\".\"):\n if start == \".\":\n start = os.curdir\n\n try:\n return os.path.relpath(path, start)\n except ValueError:\n # On Windows, paths on different devices prevent it to work. Use that\n # full path then.\n if isWin32OrPosixWindows():\n return os.path.abspath(path)\n raise", "def _GetRelativeLabelPath(self, label):\n\n if self._AreLabelsPaths():\n return label\n\n return os.path.join(*self.GetLabelComponents(label))", "def get_relative_path(self):\n if self.dip or self.sip or self.replica:\n raise PackageError(\n \"Get relative path for sip or replica packages not yet implemented\"\n )\n if self.deleted:\n raise PackageError(\"There are no relative paths for deleted packages\")\n if self.uuid is None:\n raise PackageError(\"Cannot generate a relative path without a package UUID\")\n rel = \"\"\n left_offset = len(self.default_pair_tree)\n right_offset = -len(self.compressed_ext)\n try:\n if self.current_path.endswith(self.compressed_ext):\n rel = self.current_path[left_offset:right_offset]\n else:\n rel = self.current_path[left_offset:]\n except AttributeError:\n raise PackageError(\"Current path doesn't exist for the package\")\n return \"{}/data/METS.{}.xml\".format(rel, self.uuid)", "def relpath(path, start=None):\n relative = get_instance(path).relpath(path)\n if start:\n # Storage relative path\n # Replaces \"\\\" by \"/\" for Windows.\n return os_path_relpath(relative, start=start).replace('\\\\', '/')\n return relative", "def _resolve_relative_path(filepath: str):\n if not filepath:\n return None\n\n inf_path = os.path.join(os.path.dirname(__file__), filepath)\n\n return inf_path", "def real_absolute_path(path):\n return os.path.realpath(absolute_path(path))", "def relative(self):\n rel = self.path\n if self.params:\n rel += ';' + self.params\n if self.query:\n rel += '?' + self.query\n if self.fragment:\n rel += '#' + self.fragment\n return rel", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def getRelativePath(fullPath, rootPath, liberalChars=True):\n\tif not fullPath.startswith(rootPath):\n\t\traise ValueError(\n\t\t\t\"Full path %s does not start with resource root %s\"%(fullPath, rootPath))\n\tres = fullPath[len(rootPath):].lstrip(\"/\")\n\tif not liberalChars and not _SAFE_FILENAME.match(res):\n\t\traise ValueError(\"File path '%s' contains characters known to\"\n\t\t\t\" the DaCHS authors to be hazardous in URLs. Please defuse the name\"\n\t\t\t\" before using it for published names (or see howDoI).\"%res)\n\treturn res", "def get_abs_path(path):\r\n abs_path = lib_path.abspath(path)\r\n return abs_path", "def getRelativePath(self, project):\n\n mainPath = self.__projects[0].getPath()\n projectPath = project.getPath()\n\n return os.path.relpath(projectPath, mainPath)", "def relative_path(self, item):\n if hasattr(item, 'path'):\n return self.path.relative_path(item.path)\n else:\n return self.path.relative_path(item)", "def path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\"./\")\n\n print(\"[RESOURCE]\", relative_path)\n rPath = os.path.join(base_path, relative_path)\n return rPath", "def get_path_relative_to_http_root(file_path):\n return os.path.relpath(file_path, get_http_path_prefix())", "def _fullpath(self, path):\n splitpath = path.split(self._baseurl, 2)\n if len(splitpath) == 1:\n result = os.path.join(self._baseurl, path)\n else:\n result = path # path contains baseurl already\n return result", "def full_path(filename):\n\timport os.path\n\tfolder = os.path.dirname(os.path.realpath(__file__))\n\treturn os.path.join(folder, filename)", "def relpath(targpath: str, basepath: str='') -> str:\n pass", "def path_to_related(self, path):\n # self.path = \"...functional/fixtures/img/logo.png\"\n # path = \"...functional/fixtures/docs/index.md\"\n current = self.dir\n\n while not path.startswith(current.dir.path):\n current = current.dir.parent.dir\n\n remaining = current.relative(self.path)\n\n level = current.relative(path).count(os.sep)\n\n way_back = os.sep.join(['..'] * level) or '.'\n result = \"{0}/{1}\".format(way_back, remaining)\n\n return result", "def get_relative_source_path(self, source_path=None):\r\n if not source_path:\r\n source_path = self.source_path\r\n if source_path is None:\r\n return None\r\n\r\n return os.path.relpath(\r\n os.path.abspath(os.path.join(self.settings['PATH'], source_path)),\r\n os.path.abspath(self.settings['PATH'])\r\n )", "def rel_filename(filename, relative_to=None):\n if relative_to is None:\n relative_to = os.getcwd()\n if not relative_to.endswith(os.path.sep):\n relative_to += os.path.sep\n filename = os.path.normpath(os.path.abspath(filename))\n if filename.startswith(relative_to):\n return filename[len(relative_to):]\n else:\n return filename", "def __relative_path(self, p4file):\n return self.ctx.depot_path(p4file.depot_path).to_gwt()", "def rel_path(self) -> str:\n return os.path.dirname(self._filepath_oracle.filepath())", "def resource_path(relative_path):\n # base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n # return os.path.join(base_path, relative_path)\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), relative_path)", "def get_relative_path(self, source: str) -> str:\n abs_top_level_dir = os.path.normcase(\n os.path.normpath(self.get_top_level_directory()))\n abs_working_dir = os.path.normcase(\n os.path.normpath(os.path.join(os.getcwd(), source)))\n\n if not abs_working_dir.startswith(abs_top_level_dir):\n logger.debug(\n \"Repository top level directory is '{}'. Specified working directory is '{}'\".format(\n abs_top_level_dir, {abs_working_dir}))\n raise Exception(\n \"Experiment file is not inside current \"\n + self.get_type() + \" directory.\")\n\n result = abs_working_dir.replace(abs_top_level_dir, \"\")\n return self.norm_to_posix_path(result)", "def realpath(path: str) -> str:\n pass", "def relative_path(base, target):\r\n common, base_tail, target_tail = split_common(base, target)\r\n #print \"common:\", common\r\n #print \"base_tail:\", base_tail\r\n #print \"target_tail:\", target_tail\r\n r = len(base_tail) * [os.pardir] + target_tail\r\n if r:\r\n return os.path.join(*r)\r\n else:\r\n return os.curdir", "def get_path(self, path):\n return abspath(join(self.origin, *path))", "def get_relative_source_path(self, source_path=None):\n if not source_path:\n source_path = self.source_path\n if source_path is None:\n return None\n\n return posixize_path(\n os.path.relpath(\n os.path.abspath(os.path.join(\n self.settings['PATH'],\n source_path)),\n os.path.abspath(self.settings['PATH'])\n ))", "def get_current_module_path(module_path, relative_path=\"\"):\n base_path = os.path.dirname(module_path)\n file_path = os.path.join(base_path, relative_path)\n file_path = os.path.normpath(file_path)\n\n return file_path", "def get_relative_regression_path(cls) -> str:\n # Get the fully-qualified name of the subject (in dotted form)\n fully_qualified_name: str = cls.subject_type().__module__ + '.' + cls.subject_type().__qualname__\n\n # Replace the dots with platform-dependent slashes\n return fully_qualified_name.replace(\".\", os.sep)", "def resolvePath(rootPath, relPath):\n\trelPath = relPath.lstrip(\"/\")\n\tfullPath = os.path.realpath(os.path.join(rootPath, relPath))\n\tif not fullPath.startswith(rootPath):\n\t\traise ValueError(\n\t\t\t\"Full path %s does not start with resource root %s\"%(fullPath, rootPath))\n\tif not os.path.exists(fullPath):\n\t\traise ValueError(\n\t\t\t\"Invalid path %s. This should not happend.\"%(fullPath))\n\treturn fullPath", "def relpath(long_path, base_path):\n if not hasattr(path, \"relpath\"):\n\n if not long_path.startswith(base_path):\n raise RuntimeError(\"Unexpected arguments\")\n\n if long_path == base_path:\n return \".\"\n\n i = len(base_path)\n\n if not base_path.endswith(path.sep):\n i += len(path.sep)\n\n return long_path[i:]\n else:\n return path.relpath(long_path, base_path)", "def relpath(filename):\n return os.path.join(os.path.dirname(__file__), filename)", "def get_absolute_path(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetAbsolutePath', self.handle)", "def to_file_path(self, resourcePath: str) -> PurePath:\n rel = resourcePath.replace('res://', '')\n return self._root.joinpath(rel)", "def resource_path(relative_path):\r\n try:\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def get_absolute_path(*args):\n directory = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(directory, *args)", "def absolute_path(path):\n return os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n \"..\",\n path\n )\n )", "def getAbsPath(*p):\n\tfrom os.path import abspath, join\n\tif len(p) >= 1:\n\t\treturn normalizePath(join(abspath(p[0]), *p))\n\treturn \"\"", "def get_full_path(self):\n return self.path_display", "def file_path_short(self):\r\n if not hasattr(self, '_file_path_short'):\r\n if self.file_path:\r\n result = None\r\n\r\n for path in sys.path:\r\n candidate = os.path.relpath(self.file_path, path)\r\n if not result or (len(candidate.split('/')) < len(result.split('/'))):\r\n result = candidate\r\n\r\n self._file_path_short = result\r\n else: \r\n self._file_path_short = None\r\n\r\n return self._file_path_short", "def combine_path(base_path, relative_ref):\n if (base_path != \"\"):\n os.chdir(base_path)\n # Handle if .tex is supplied directly with file name or not\n if relative_ref.endswith('.tex'):\n return os.path.join(base_path, relative_ref)\n else:\n return os.path.abspath(relative_ref) + '.tex'", "def get_path(self, path):\n if path.startswith('/') and not path.startswith('~/'):\n return os.getcwd() + '/' + path\n else:\n return path", "def realpath(self, path):\n return os.path.realpath(path)", "def realpath(self):\n return pbxpath.realpath(self.project(), self.abspath())", "def local_to_extern_path(self, path: PurePath) -> PurePath:\n return self.path_extern_supervisor / path.relative_to(self.path_supervisor)", "def get_full_path(self):\n try:\n full_path = os.path.abspath(self.FILENAME)\n return full_path\n except Exception as e:\n raise SystemExit(f\"Could not complete operation: {e}\")", "def relpath(filename):\n\n return os.path.join(os.path.dirname(__file__), filename)", "def _sanitize_relative_path(self, path):\n last = None\n path = os.path.normpath(path)\n while path != last:\n last = path\n # Note: os.path.join treats '/' as os.sep on Windows\n path = path.lstrip(os.sep).lstrip('/')\n path = path.lstrip(os.pardir).lstrip('..')\n drive, path = os.path.splitdrive(path) # for Windows\n return path", "def rel_node_path(self, node):\n return os.path.relpath(node.path, self.path)", "def get_fspath ( self, relpath=None ):\n if relpath:\n return self.root + os.sep + str ( relpath )\n else:\n return self.root", "def get_abspath(path: str) -> str:\n if os.path.isabs(path):\n return path\n\n return os.path.join(os.path.dirname(__file__), path)", "def full_path(self):\n fullpath = os.path.join(self.path, self.name)\n if self.path == \"\":\n fullpath = self.name\n return fullpath", "def abspath(self, path):\n # We do this here to reduce the 'import numpy' initial import time.\n from urllib.parse import urlparse\n\n # TODO: This should be more robust. Handles case where path includes\n # the destpath, but not other sub-paths. Failing case:\n # path = /home/guido/datafile.txt\n # destpath = /home/alex/\n # upath = self.abspath(path)\n # upath == '/home/alex/home/guido/datafile.txt'\n\n # handle case where path includes self._destpath\n splitpath = path.split(self._destpath, 2)\n if len(splitpath) > 1:\n path = splitpath[1]\n scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)\n netloc = self._sanitize_relative_path(netloc)\n upath = self._sanitize_relative_path(upath)\n return os.path.join(self._destpath, netloc, upath)", "def shortpath(path):\r\n import os\r\n if path.startswith(base_dir):\r\n return path[len(base_dir) + len(os.path.sep) : ]\r\n return path", "def get_relative_path (folder, file) :\n if not os.path.exists (folder) : raise PQHException (folder + \" does not exist.\")\n if not os.path.exists (file) : raise PQHException (file + \" does not exist.\")\n sd = folder.replace(\"\\\\\",\"/\").split(\"/\")\n sf = file.replace(\"\\\\\",\"/\").split(\"/\")\n for i in range (0, len (sd)) :\n if i >= len (sf) : break\n elif sf [i] != sd [i] : break\n res = copy.copy (sd)\n j = i\n while i < len (sd) :\n i += 1\n res.append (\"..\")\n res.extend (sf [j:])\n return os.path.join (*res)", "def get_path_to(self, *args):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))" ]
[ "0.8088731", "0.8012601", "0.7939858", "0.7906186", "0.78002477", "0.77519965", "0.77290946", "0.76823074", "0.7662462", "0.76563984", "0.76526093", "0.76337326", "0.7589994", "0.7561121", "0.7560908", "0.7560908", "0.7560908", "0.7560908", "0.7560908", "0.755059", "0.7486504", "0.74226713", "0.7418113", "0.7387571", "0.73701566", "0.735198", "0.7339507", "0.7333916", "0.7330721", "0.73059475", "0.72877645", "0.7263973", "0.72569174", "0.7233936", "0.72290987", "0.7220667", "0.72195756", "0.72030234", "0.71883315", "0.71631914", "0.7155719", "0.71225", "0.7112152", "0.71095085", "0.7086002", "0.7084036", "0.7078903", "0.70623326", "0.70604515", "0.70596325", "0.7058787", "0.7051354", "0.7038121", "0.7037155", "0.70319235", "0.70276743", "0.7025113", "0.7024893", "0.7014441", "0.7007731", "0.69954115", "0.6990292", "0.69866216", "0.6982525", "0.69739366", "0.69723135", "0.6952412", "0.69516885", "0.6927497", "0.6917546", "0.6910527", "0.689939", "0.6892371", "0.68897283", "0.68785644", "0.6877897", "0.6871736", "0.68685216", "0.68605727", "0.68596", "0.6856019", "0.68533874", "0.68516886", "0.6850773", "0.6847366", "0.68473643", "0.684667", "0.6833865", "0.6828496", "0.68276376", "0.6826281", "0.6810161", "0.68081135", "0.6805806", "0.6804037", "0.68017197", "0.6792509", "0.6789444", "0.678829", "0.6782711" ]
0.7933692
3
Returns an array of full paths for a relative path with globs
def expand_path(__file__, path_with_globs): return glob.glob(relative_path(__file__, path_with_globs))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_paths(file_path):\n return glob(path.join(file_path, '*'))", "def glob(path: str) -> list[str]:\n fs, relative_path = url_to_fs(path)\n return cast(list[str], fs.glob(relative_path))", "def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.glob(os.path.normpath(os.path.join(top_dir, pattern)))\n return files", "def expand_paths(__file__, paths_with_globs):\n if isinstance(paths_with_globs, str):\n return expand_path(__file__, paths_with_globs)\n else:\n expanded_globs = [\n expand_path(__file__, path) for path in paths_with_globs\n ]\n # Flatten\n return list(itertools.chain.from_iterable(expanded_globs))", "def recursive_glob(path):\n if \"*\" not in path:\n # Glob isn't needed.\n return [path]\n elif \"**\" not in path:\n # Recursive glob isn't needed.\n return path_utils.glob(path)\n else:\n return path_utils.glob(path, recursive=True)", "def abspath(files):\n\n files = sum([glob.glob(x) for x in files], [])\n return [os.path.abspath(x) for x in files]", "def glob_fs(self):\n\n found_files = []\n for pattern in self.glob_patterns:\n found_files += [PathString(present_file)\n for present_file in glob.glob(pattern)]\n return found_files", "def glob_paths(self, name, source, pattern, test_data=()):\n assert isinstance(source, config_types.Path)\n result = self._run(\n name, ['glob', source, pattern],\n lambda: self.test_api.glob_paths(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(*x.split(self.m.path.sep))\n for x in result.stdout.splitlines()]\n result.presentation.logs[\"glob\"] = map(str, ret)\n return ret", "def expand_globpaths(globpaths, cwd=None):\n with cd(cwd):\n paths = sum((recursive_glob(p) for p in globpaths), [])\n return expand_paths(paths, cwd)", "def get_all_fullpaths(self):\n files = []\n for mf in self.manifests:\n files.extend(self.manifests[mf].get_fullpaths())\n return files", "def get_all_img_paths(path_to_folder):\n all_subfolders = glob.glob(path_to_folder + '*')\n all_paths = []\n for folder in all_subfolders:\n all_paths.extend(glob.glob(folder + '/*'))\n # get relative paths\n common_prefix = path_to_folder\n relative_paths = [os.path.relpath(path, common_prefix) for path in all_paths]\n return relative_paths", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def handle_files_args(*paths_args):\n paths = []\n\n for paths_arg in paths_args:\n # Handle paths implicitly rooted at user home dir\n paths_arg = os.path.expanduser(paths_arg)\n\n # Expand wildcards\n paths_arg = glob.glob(paths_arg)\n\n # Create list of pathlib.Path objects\n paths.extend([pathlib.Path(path_arg) for path_arg in paths_arg])\n\n return paths", "def get_files(pattern):\n\n files = [realpath(p) for p in glob2.glob(pattern)]\n return list(set(files))", "def fullpathlist(path):\n try:\n return [os.path.join(path, filename) for filename in os.listdir(path)]\n except OSError:\n return []", "def resolve_file_paths(local_path):\n local_path = os.path.abspath(local_path)\n files = []\n if local_path.find('*') > -1:\n # Supplied path is a pattern - relative directory will be the\n # path up to the first wildcard\n ref_dir_str = local_path.split('*')[0].rstrip('/\\\\')\n if not os.path.isdir(ref_dir_str):\n ref_dir_str = os.path.dirname(ref_dir_str)\n ref_dir = pathlib.Path(ref_dir_str)\n pattern = local_path[len(ref_dir_str + os.pathsep):]\n files = [str(f) for f in ref_dir.glob(pattern) if f.is_file()]\n local_path = ref_dir_str\n else:\n if os.path.isdir(local_path):\n # Supplied path is a directory\n files = [os.path.join(local_path, f) for f in os.listdir(local_path)\n if os.path.isfile(os.path.join(local_path, f))]\n elif os.path.isfile(local_path):\n # Supplied path is a file\n files.append(local_path)\n local_path = os.path.dirname(local_path)\n return local_path, files", "def glob(self, pathname, with_matches=False):\r\n return list(self.iglob(pathname, with_matches))", "def glob(glob_pattern: str, directoryname: str) -> List[str]:\n matches = []\n for root, dirnames, filenames in os.walk(directoryname):\n for filename in fnmatch.filter(filenames, glob_pattern):\n absolute_filepath = os.path.join(root, filename)\n matches.append(absolute_filepath)\n return matches", "def to_path_globs(self, glob_match_error_behavior: GlobMatchErrorBehavior) -> PathGlobs:\n return self._generate_path_globs(\n (*self.file_includes, *self.dir_includes, *self.ignores), glob_match_error_behavior\n )", "def globs(cls, *globspecs, **kw):\r\n root = kw.pop('root', os.curdir)\r\n def relative_glob(globspec):\r\n for fn in glob.glob(os.path.join(root, globspec)):\r\n yield os.path.relpath(fn, root)\r\n def combine(files, globspec):\r\n return files ^ set(relative_glob(globspec))\r\n return cls(lambda: reduce(combine, globspecs, set()))", "def expand(self, path_list):\n path_list2 = []\n for path in path_list:\n if glob.has_magic(path):\n iterator = glob.iglob(path)\n path_list2.extend(iterator)\n else:\n path_list2.append(path)\n return path_list2", "def glob(self):\n self._deduplicate()\n result = []\n for entry in self._entries:\n pp = entry.posix_path()\n if GLOBBABLE_REGEX.search(pp):\n try:\n globs = glob.glob(entry.posix_path())\n result += globs\n except re.error:\n result.append(pp)\n else:\n result.append(pp)\n self._entries = [Path(g) for g in result]\n self._clean = False\n self._current = 0", "def expand_paths(paths, cwd=None):\n return [expand_path(x, cwd) for x in paths]", "def glob(path):\n path = os.path.abspath(path)\n if os.path.isdir(path):\n files = [d for d in [\n os.path.join(path, f) for f in os.listdir(path)\n ] if os.path.isfile(d)]\n else:\n files = glob.glob(path)\n print(\"Found {0} files\".format(len(files)))\n return files", "def expand_paths(self, paths):\n \n expanded_paths = []\n if isinstance(paths, str): # A single path\n expanded = glob.glob(paths)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n elif isinstance(paths, list): # Multiple path\n for p in paths:\n expanded = glob.glob(p)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n else:\n _LOG.exception(\"Unknown input for the 'add' function.\")\n return expanded_paths", "def files(pathspec):\n\treturn [f for f in glob.glob(pathspec)]", "def getFilesMulti(paths, pat):\n filelist = []\n for d in paths:\n filelist += glob.glob( os.path.join(d,pat) )\n filelist = [ f.replace(os.path.sep,'/') for f in filelist]\n return filelist", "def get_paths(input_folder):\n list_files = []\n conll_folder = glob.glob(input_folder + '/*.json')\n \n for filename in conll_folder:\n list_files.append(filename)\n\n return list_files", "def files(self):\n try:\n return glob.glob(self.path)\n except (AttributeError, TypeError):\n try:\n return glob.glob(self.alias)\n except (AttributeError, TypeError):\n return []", "def json_fpaths() -> [str]:\n return glob.glob(f\"{RECIPES_DIRPATH}/*.json\")", "def _resolve_paths(paths):\n allowed_ext = tuple(MIMES.keys())\n\n resolved = []\n for path in paths:\n if os.path.isdir(path):\n resolved.extend(\n entry.path for entry in os.scandir(path)\n if entry.is_file() and entry.name.lower().endswith(allowed_ext)\n )\n elif os.path.isfile(path) and path.lower().endswith(allowed_ext):\n resolved.append(path)\n return resolved", "def match_glob(pathname):\n # type: (six.text_type) -> List[six.text_type]\n if sys.version_info >= (3, 5):\n result = glob.glob(pathname, recursive=True)\n else:\n # use the third party glob library to handle a recursive glob.\n result = glob2.glob(pathname)\n return result", "def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list", "def to_path_globs(self, glob_match_error_behavior: GlobMatchErrorBehavior) -> PathGlobs:\n return self._generate_path_globs((*self.includes, *self.ignores), glob_match_error_behavior)", "def glob(pattern: str) -> List[str]:\n\n path = clean_path(pattern)\n result = Stat._cache.get(path)\n\n if isinstance(result, BaseException):\n return []\n\n if result is None:\n paths = glob_files(pattern, recursive=True)\n if paths != [pattern]:\n return [clean_path(path) for path in paths]\n result = Stat._result(pattern, throw=False)\n assert not isinstance(result, BaseException)\n\n return [pattern]", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def get_filepattern_paths(filepath, file_pattern):\n found_patterns = glob.glob(filepath+file_pattern)\n\n return found_patterns", "def _get_file_paths(self, ignored_exts: Optional[Set[str]]) -> List[str]:\n dir_path = os.path.join(self._target_dir, '**')\n all_paths = glob.glob(dir_path, recursive=True)\n if ignored_exts is None:\n return [p for p in all_paths if os.path.isfile(p)]\n file_paths = [p for p in all_paths if self._extr_ext(p) not in ignored_exts]\n return [p for p in file_paths if os.path.isfile(p)]", "def _glob_files(directories, extensions):\n pwd = Path(__file__).resolve().parent\n open3d_root_dir = pwd.parent\n\n file_paths = []\n for directory in directories:\n directory = open3d_root_dir / directory\n for extension in extensions:\n extension_regex = \"*.\" + extension\n file_paths.extend(directory.rglob(extension_regex))\n file_paths = [str(file_path) for file_path in file_paths]\n file_paths = sorted(list(set(file_paths)))\n return file_paths", "def recursive_glob(stem, file_pattern):\n\n if sys.version_info >= (3, 5):\n return glob(stem + \"/**/\" + file_pattern, recursive=True)\n else:\n # gh-316: this will avoid invalid unicode comparisons in Python 2.x\n if stem == str(\"*\"):\n stem = \".\"\n matches = []\n for root, dirnames, filenames in os.walk(stem):\n for filename in fnmatch.filter(filenames, file_pattern):\n matches.append(path_join_robust(root, filename))\n return matches", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n image_paths = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_paths.append(os.path.join(looproot, filename))\n return image_paths", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n image_paths = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_paths.append(os.path.join(looproot, filename))\n return image_paths", "def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths", "def files(self) -> Generator[Path, None, None]:\n return Path(self.package).resolve(strict=True).glob(self.glob)", "def get_files_from_path(path=None):\n\n abspath = os.path.abspath(path)\n if os.path.isfile(abspath):\n files = [abspath]\n elif os.path.isdir(abspath):\n files = [\n os.path.join(abspath, fname)\n for fname in os.listdir(abspath)\n ]\n else:\n raise RuntimeError(f\"[-] '{path}' must be a file or directory\")\n return files", "def get_paths_list_from_folder(folder):\n names = os.listdir(folder)\n relative_paths = [os.path.join(folder, image_name) for image_name in names]\n return relative_paths", "def files(self):\r\n files = []\r\n for path in self.paths:\r\n if os.path.isdir(path):\r\n files.extend(glob.glob(os.path.join(path, f'*{self.ext}')))\r\n else:\r\n files.extend(glob.glob(path))\r\n return list(set(self.get_pattern(fname) for fname in files))", "def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(), self.target_base)\r\n for src in self.sources:\r\n yield os.path.join(abs_target_base, src)", "def input_files_from_path(path):\n import glob\n input_files = None\n if type(path) is list:\n input_files = []\n for p in path:\n if '*' in p:\n input_files.extend(glob.glob(p))\n else: # neither wildcard nor comma separated list\n input_files.append(p)\n else:\n if ',' in path:\n input_files = path.split(',')\n elif '*' in path:\n input_files = glob.glob(path)\n else: # neither wildcard nor comma separated list\n input_files = [path]\n input_files = [os.path.abspath(f) for f in input_files]\n return [f for f in input_files if os.path.exists(f) or f.startswith('/store')]", "def files_in_dir(root_dir):\n file_set = set()\n\n for dir_, _, files in os.walk(root_dir):\n for file_name in files:\n rel_dir = os.path.relpath(dir_, root_dir)\n rel_file = os.path.join(rel_dir, file_name)\n file_set.add(rel_file)\n\n return [Path(PureWindowsPath(f)) for f in file_set]", "def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]", "def recursive_glob(self, rootdir='.', suffix=''):\n return [os.path.join(rootdir, filename)\n for filename in sorted(os.listdir(rootdir)) if filename.endswith(suffix)]", "def glob(self, glob_expr: str) -> Iterator[NicePath]:\n for path in self._root.glob(glob_expr):\n relative_path = path.relative_to(self._root)\n if not self._match_include(relative_path):\n continue\n if self._match_exclude(relative_path):\n continue\n\n yield NicePath(path)", "def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths", "def recursive_glob(path, pattern):\n for root, dirnames, filenames in os.walk(path, followlinks=True):\n for filename in fnmatch.filter(filenames, pattern):\n yield os.path.join(root, filename)", "def globfiles(path, pattern):\n return Path(path).glob(pattern)", "def test_glob(tmp_path: Path) -> None:\n sample_original = Path(__file__).parent.joinpath(\"sample.txt\")\n\n tmp_sample1 = tmp_path.joinpath(\"sample1.txt\")\n shutil.copyfile(sample_original, tmp_sample1)\n tmp_sample2 = tmp_path.joinpath(\"sample2.txt\")\n shutil.copyfile(sample_original, tmp_sample2)\n\n tmp_sub_dir = tmp_path.joinpath(\"subdir\")\n tmp_sub_dir.mkdir()\n tmp_sample3 = tmp_sub_dir.joinpath(\"sample3.txt\")\n shutil.copyfile(sample_original, tmp_sample3)\n\n files: List[str] = glob.glob(str(tmp_path.joinpath(\"**/*.txt\")), recursive=True)\n assert files == [str(tmp_sample1), str(tmp_sample2), str(tmp_sample3)]\n\n # pathlib.Path equivalent\n file_paths: Generator[Path, None, None] = tmp_path.glob(\"**/*.txt\")\n assert [str(path) for path in file_paths] == files", "def path_generator(initial_root):\n for root, dirs, files in os.walk(initial_root):\n paths = [os.path.join(root, name) for name in files]\n return paths", "def get_paths(self):\n return self.path.split(',')", "def ffind(path, shellglobs=None, namefs=None, relative=True):\n if not os.access(path, os.R_OK):\n raise ScriptError(\"cannot access path: '%s'\" % path)\n\n fileList = [] # result list\n try:\n for dir, subdirs, files in os.walk(path):\n if shellglobs:\n matched = []\n for pattern in shellglobs:\n filterf = lambda s: fnmatch.fnmatchcase(s, pattern)\n matched.extend(list(filter(filterf, files)))\n fileList.extend(['%s%s%s' % (dir, os.sep, f) for f in matched])\n else:\n fileList.extend(['%s%s%s' % (dir, os.sep, f) for f in files])\n if not relative: fileList = list(map(os.path.abspath, fileList))\n if namefs:\n for ff in namefs: fileList = list(filter(ff, fileList))\n except Exception as e: raise ScriptError(str(e))\n return(fileList)", "def aggregate(\n *passed_paths: str, recursive: bool = False\n) -> List[pathlib.Path]:\n\n stack = []\n\n for passed_path in passed_paths:\n path = pathlib.Path(passed_path)\n\n if path.is_file():\n stack.append(path)\n\n return stack", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n return [\n os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames\n if filename.endswith(suffix)\n ]", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n return [\n os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames\n if filename.endswith(suffix)\n ]", "def _get_files(self, path, pattern):\n files = [\n os.path.join(path, file)\n for file in glob.glob(os.path.join(path, pattern))\n ]\n return files", "def glob1(self, dirname, pattern):\n names = self.listdir(dirname)\n if pattern[0] != '.':\n names = filter(lambda x: x[0] != '.',names)\n return fnmatch.filter(names, pattern)", "def recursive_glob(rootdir='.', suffix=''):\n return [os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames if filename.endswith(suffix)]", "def buildListOfFiles(searchGlob):\n return [fpath for fpath in glob2.iglob(searchGlob) if os.path.isfile(fpath)]", "def list_references(glob_pattern, observatory, full_path=False):\n references = []\n for path in utils.get_reference_paths(observatory):\n pattern = os.path.join(path, glob_pattern)\n references.extend(_glob_list(pattern, full_path))\n if full_path:\n references = [ref for ref in references if not os.path.isdir(ref)]\n return sorted(set(references))", "def gather_files(path_specs, file_name_pattern, recursively=False):\n files = []\n for path_spec in path_specs:\n if os.path.isdir(path_spec):\n files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively))\n elif os.path.isfile(path_spec):\n files.append(path_spec)\n return files", "def get_files(path: str, extension: str = '.wav') -> List[Path]:\n\n return list(Path(path).expanduser().resolve().rglob(f'*{extension}'))", "def _glob_list(pattern, full_path=False):\n if full_path:\n return sorted(glob.glob(pattern))\n else:\n return sorted([os.path.basename(fpath) for fpath in glob.glob(pattern)])", "def files(self, glob='*', limit=0):\n for a in self.filenames(glob, limit=limit):\n yield Path(a)", "def getpaths_fromdir(input_prefix_, directory_):\n path = os.path.join(input_prefix_, \"%s*\" % directory_, \"*\")\n return [tuple([directory_, path])]", "def GetAllFilepaths(root_directory):\n path_list = []\n for dirpath, _, filenames in os.walk(root_directory):\n for filename in filenames:\n path_list.append(os.path.abspath(os.path.join(dirpath, filename)))\n return path_list", "def paths(folder_path, extension):\n\n abs_paths = []\n \n for root, dirs, files in os.walk(folder_path):\n \n for f in files:\n \n fullpath = os.path.join(root, f)\n \n if os.path.splitext(fullpath)[1] == '.{}'.format(extension):\n \n abs_paths.append(fullpath)\n\n abs_paths = sorted(abs_paths, key=paths_sort)\n \n return abs_paths", "def expandPathsToFiles (paths):\n\n\t\tdef getFiles (dirPath):\n\t\t\tfor root, dirs, files in os.walk(dirPath):\n\t\t\t\tfor file in files:\n\t\t\t\t\tyield os.path.join(root, file)\n\n\t\tfiles = []\n\t\tfor path in paths:\n\t\t\tif os.path.isdir(path):\n\t\t\t\tfiles += list(getFiles(path))\n\t\t\telse:\n\t\t\t\tfiles.append(path)\n\n\t\treturn files", "def batchfiles(fdir='.'):\n \n os.chdir(fdir)\n names = glob.iglob('*')\n # Absolute path rather than relative path allows changing of directories in fn_name.\n names = sorted([os.path.abspath(name) for name in names])\n return(names)", "def pathext_list():\n return (os.environ.get(\"PATHEXT\") or \"\").split(os.pathsep)", "def expand_source_files(filenames, cwd=None):\n out = []\n for f in expand_globpaths(filenames.split(), cwd):\n if path_utils.isdir(f):\n # If we have a directory, collect all the .py files within it....\n out += recursive_glob(path_utils.join(f, \"**\", \"*.py\"))\n elif f.endswith(\".py\"):\n out.append(f)\n elif is_file_script(f, cwd):\n # .....and only process scripts when specfiied by the user.\n out.append(f)\n\n return set(out)", "def local_paths(self) -> List[Path]:\n return self._local_paths", "def resolve_specs(paths):\n specs = []\n for path in paths:\n if os.path.isdir(path):\n _, _, files = os.walk(path).next()\n specs.extend(os.path.join(path, fname) for fname in files)\n else:\n specs.append(path)\n return specs", "def complete_paths(path, filenames):\n\treturn [ \"{0}{1}\".format(path, filenames[i]) for i in range(0, len(filenames)) ]", "def _get_pyfilelist(srcpath, usegitignore=True) -> list:\n gitignorefile = srcpath / Path(\".gitignore\")\n if usegitignore and gitignorefile.exists():\n with gitignorefile.open('r') as f:\n lines = f.read().splitlines()\n gitignore = [\n srcpath / Path(line)\n for line in lines\n if not line.strip().startswith(\"#\")\n and len(line.strip()) > 1\n and Path(line).suffix == \"\"\n ] + [srcpath / Path(\".git\")]\n viablepaths = [\n p for p in srcpath.glob(\"*/\") if p.is_dir() and p not in gitignore\n ]\n filelist = set().union(*[set(p.glob(\"**/*.py\")) for p in viablepaths])\n filelist = filelist.union(*[set(srcpath.glob('*.py'))])\n else:\n filelist = srcpath.glob(\"**/*.py\")\n return [p.relative_to(srcpath) for p in filelist]", "def get_paths(self):\n paths = []\n for f in dir(self):\n o = getattr(self, f)\n if callable(o) and hasattr(o, '_path'):\n paths.append(getattr(o, '_path'))\n return paths", "def get_gswe_paths(dirpath, extension = '.tif'):\n\n\textension = '*' + extension\n\tpath = os.path.join(dirpath, extension)\n\tfile_path_list = glob.glob(path)\n\tfile_path_list.sort()\n\treturn file_path_list", "def get_files(path, exclude=None):\n exclude = exclude or '*.pyc'\n exclude_expr = '{}/**/{}'.format(path, exclude)\n exclude = set(glob.iglob(exclude_expr, recursive=True))\n\n expr = '{}/**'.format(path)\n paths = set(glob.iglob(expr, recursive=True)) - exclude\n\n files = []\n for filename in paths:\n if os.path.isfile(filename):\n files.append(os.path.abspath(filename))\n return files", "def _get_target_files(self) -> List[Path]:\n repo = get_git_repo()\n submodules = repo.submodules # type: ignore\n submodule_paths = [\n self._fname_to_path(repo, submodule.path) for submodule in submodules\n ]\n\n # resolve given paths relative to current working directory\n paths = [p.resolve() for p in self._paths]\n if self._base_commit is not None:\n paths = [\n a\n for a in (self._status.added + self._status.modified)\n # diff_path is a subpath of some element of input_paths\n if any((a == path or path in a.parents) for path in paths)\n ]\n changed_count = len(paths)\n click.echo(f\"| looking at {unit_len(paths, 'changed path')}\", err=True)\n paths = [\n path\n for path in paths\n if all(\n submodule_path not in path.parents\n for submodule_path in submodule_paths\n )\n ]\n if len(paths) != changed_count:\n click.echo(\n f\"| skipping files in {unit_len(submodule_paths, 'submodule')}: \"\n + \", \".join(str(path) for path in submodule_paths),\n err=True,\n )\n\n # Filter out ignore rules, expand directories\n self._ignore_rules_file.seek(0)\n patterns = Parser(self._base_path).parse(self._ignore_rules_file)\n\n file_ignore = FileIgnore(\n base_path=self._base_path, patterns=patterns, target_paths=paths\n )\n\n walked_entries = list(file_ignore.entries())\n click.echo(\n f\"| found {unit_len(walked_entries, 'file')} in the paths to be scanned\",\n err=True,\n )\n filtered: List[Path] = []\n for elem in walked_entries:\n if elem.survives:\n filtered.append(elem.path)\n\n skipped_count = len(walked_entries) - len(filtered)\n if skipped_count:\n click.echo(\n f\"| skipping {unit_len(range(skipped_count), 'file')} based on path ignore rules\",\n err=True,\n )\n\n relative_paths = [path.relative_to(self._base_path) for path in filtered]\n\n return relative_paths", "def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)", "def genome_paths(self, ext=\"fasta\"):\n return [\n os.path.join(self.path, genome)\n for genome in os.listdir(self.path)\n if genome.endswith(ext)\n ]", "def read_filepaths(self, directory):\n folder_paths = [os.path.join(directory, folder) for folder in os.listdir(directory) if not folder.startswith('.')]\n filepaths = [[os.path.join(cur_folder, cur_file) for cur_file in os.listdir(cur_folder)] for cur_folder in folder_paths]\n return filepaths", "def get_all_files(cwd):\n return os.listdir(cwd)", "def findFiles(target, path):\r\n\tfiles = []\r\n\tlyst = os.listdir(path)\r\n\tfor element in lyst:\r\n\t\tif os.path.isfile(element):\r\n\t\t\tif target in element:\r\n\t\t\t\tfiles.append(path + os.sep + element)\r\n\t\telse:\r\n\t\t\tos.chdir(element)\r\n\t\t\tfiles.extend(findFiles(target, os.getcwd()))\r\n\t\t\tos.chdir(\"..\")\r\n\treturn files", "def find(self, path_list):\n import fnmatch\n path_list2 = []\n for pattern in path_list:\n for root, _, filenames in os.walk('.'):\n for filename in fnmatch.filter(filenames, pattern):\n path_list2.append(os.path.join(root, filename))\n return path_list2", "def volume_paths(path):\n files = (os.path.join(path, f) for f in sorted(os.listdir(path)))\n return [f for f in files if os.path.isdir(f) or f.endswith('.zip')]", "def _path_files(self):\n\n if not os.path.exists(self.path):\n return None\n\n directory_content = os.listdir(self.path)\n files = []\n\n while len(directory_content) != 0:\n\n if not directory_content[0].startswith(self.path):\n directory_obj = os.path.join(self.path, directory_content[0])\n else:\n directory_obj = directory_content[0]\n\n if os.path.isfile(directory_obj):\n files.append(directory_obj)\n elif os.path.exists(directory_obj):\n temp_directory_content = os.listdir(directory_obj)\n for obj in temp_directory_content:\n directory_content.append(os.path.join(directory_obj, obj))\n directory_content.pop(0)\n\n return files", "def paths(self, toNative=True):\n if self.__mode == E5PathPickerModes.OpenFilesMode:\n return self.path(toNative=toNative).split(\";\")\n else:\n return [self.path(toNative=toNative)]", "def get_source_files(self):\n return [\n path.as_posix()\n for path in _Path(self.src_dir).rglob(\"*\")\n if not path.is_dir()\n ] + [\n (path / \"CMakeLists.txt\").as_posix()\n for path in _PurePath(self.src_dir).parents\n ]", "def all_files_under(path):\r\n for cur_path, dirnames, filenames in os.walk(path):\r\n for filename in filenames:\r\n yield os.path.join(cur_path, filename)", "def _recursive_file_search(self, path, pattern):\n matches = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, pattern):\n matches.append(os.path.join(root, filename))\n\n return matches" ]
[ "0.78830874", "0.74837524", "0.73281705", "0.7273039", "0.7240019", "0.7209716", "0.7107599", "0.7090277", "0.70357305", "0.69723034", "0.69063663", "0.68546826", "0.68303967", "0.6820884", "0.68082666", "0.6802939", "0.6779365", "0.67275643", "0.6712124", "0.67007935", "0.666919", "0.6668771", "0.6655419", "0.66498345", "0.66493094", "0.6614659", "0.6612365", "0.66040355", "0.65995", "0.65812266", "0.65654075", "0.656423", "0.6550974", "0.65390074", "0.6530632", "0.6524588", "0.65180624", "0.65178007", "0.65150136", "0.64812887", "0.6466777", "0.6461655", "0.6461655", "0.6459017", "0.6429185", "0.6412336", "0.6399116", "0.63911635", "0.6382588", "0.63667065", "0.63557935", "0.6348125", "0.63382834", "0.6331325", "0.6313845", "0.630645", "0.63055825", "0.6299073", "0.6298701", "0.6281311", "0.6275986", "0.6261016", "0.62580067", "0.62580067", "0.62446064", "0.6222976", "0.6209855", "0.6204744", "0.619807", "0.6191656", "0.61874574", "0.61811346", "0.6170608", "0.6153743", "0.6152415", "0.61418235", "0.61402225", "0.6131748", "0.6131695", "0.61272407", "0.61186826", "0.6118158", "0.61062306", "0.60968345", "0.6095025", "0.60946214", "0.60860664", "0.6085196", "0.6083464", "0.60805625", "0.6076356", "0.6075513", "0.60731953", "0.6058916", "0.6054218", "0.60416526", "0.60393703", "0.6035879", "0.60312253", "0.60278195" ]
0.7740084
1
Returns full paths for a series relative paths with globs
def expand_paths(__file__, paths_with_globs): if isinstance(paths_with_globs, str): return expand_path(__file__, paths_with_globs) else: expanded_globs = [ expand_path(__file__, path) for path in paths_with_globs ] # Flatten return list(itertools.chain.from_iterable(expanded_globs))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expand_path(__file__, path_with_globs):\n return glob.glob(relative_path(__file__, path_with_globs))", "def get_paths(file_path):\n return glob(path.join(file_path, '*'))", "def glob_paths(self, name, source, pattern, test_data=()):\n assert isinstance(source, config_types.Path)\n result = self._run(\n name, ['glob', source, pattern],\n lambda: self.test_api.glob_paths(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(*x.split(self.m.path.sep))\n for x in result.stdout.splitlines()]\n result.presentation.logs[\"glob\"] = map(str, ret)\n return ret", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def abspath(files):\n\n files = sum([glob.glob(x) for x in files], [])\n return [os.path.abspath(x) for x in files]", "def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.glob(os.path.normpath(os.path.join(top_dir, pattern)))\n return files", "def json_fpaths() -> [str]:\n return glob.glob(f\"{RECIPES_DIRPATH}/*.json\")", "def get_all_fullpaths(self):\n files = []\n for mf in self.manifests:\n files.extend(self.manifests[mf].get_fullpaths())\n return files", "def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths", "def get_all_path(self, conf):\n\t\tpass", "def glob_fs(self):\n\n found_files = []\n for pattern in self.glob_patterns:\n found_files += [PathString(present_file)\n for present_file in glob.glob(pattern)]\n return found_files", "def expand_paths(self, paths):\n \n expanded_paths = []\n if isinstance(paths, str): # A single path\n expanded = glob.glob(paths)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n elif isinstance(paths, list): # Multiple path\n for p in paths:\n expanded = glob.glob(p)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n else:\n _LOG.exception(\"Unknown input for the 'add' function.\")\n return expanded_paths", "def path_generator(initial_root):\n for root, dirs, files in os.walk(initial_root):\n paths = [os.path.join(root, name) for name in files]\n return paths", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(), self.target_base)\r\n for src in self.sources:\r\n yield os.path.join(abs_target_base, src)", "def full_path(startPath,files):\n\n files = list_strings(files)\n base = os.path.split(startPath)[0]\n return [ os.path.join(base,f) for f in files ]", "def expand_globpaths(globpaths, cwd=None):\n with cd(cwd):\n paths = sum((recursive_glob(p) for p in globpaths), [])\n return expand_paths(paths, cwd)", "def glob(path: str) -> list[str]:\n fs, relative_path = url_to_fs(path)\n return cast(list[str], fs.glob(relative_path))", "def recursive_glob(path):\n if \"*\" not in path:\n # Glob isn't needed.\n return [path]\n elif \"**\" not in path:\n # Recursive glob isn't needed.\n return path_utils.glob(path)\n else:\n return path_utils.glob(path, recursive=True)", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n image_paths = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_paths.append(os.path.join(looproot, filename))\n return image_paths", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n image_paths = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_paths.append(os.path.join(looproot, filename))\n return image_paths", "def files(self) -> Generator[Path, None, None]:\n return Path(self.package).resolve(strict=True).glob(self.glob)", "def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list", "def filepaths(self):\n pass", "def globs(cls, *globspecs, **kw):\r\n root = kw.pop('root', os.curdir)\r\n def relative_glob(globspec):\r\n for fn in glob.glob(os.path.join(root, globspec)):\r\n yield os.path.relpath(fn, root)\r\n def combine(files, globspec):\r\n return files ^ set(relative_glob(globspec))\r\n return cls(lambda: reduce(combine, globspecs, set()))", "def files(self, glob='*', limit=0):\n for a in self.filenames(glob, limit=limit):\n yield Path(a)", "def getFilesMulti(paths, pat):\n filelist = []\n for d in paths:\n filelist += glob.glob( os.path.join(d,pat) )\n filelist = [ f.replace(os.path.sep,'/') for f in filelist]\n return filelist", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def _resolvePathPatterns(self, sources, source):\n kept = []\n pattern = re.compile(source['pathPattern'])\n basedir = self._basePath / source['path']\n if (self._basePath.name == Path(self._largeImagePath).name and\n (self._basePath.parent / source['path']).is_dir()):\n basedir = self._basePath.parent / source['path']\n basedir = basedir.resolve()\n for entry in basedir.iterdir():\n match = pattern.search(entry.name)\n if match:\n if entry.is_file():\n kept.append((entry.name, entry, match))\n elif entry.is_dir() and (entry / entry.name).is_file():\n kept.append((entry.name, entry / entry.name, match))\n for idx, (_, entry, match) in enumerate(sorted(kept)):\n subsource = copy.deepcopy(source)\n # Use named match groups to augment source values.\n for k, v in match.groupdict().items():\n if v.isdigit():\n v = int(v)\n if k.endswith('1'):\n v -= 1\n if '.' in k:\n subsource.setdefault(k.split('.', 1)[0], {})[k.split('.', 1)[1]] = v\n else:\n subsource[k] = v\n subsource['path'] = entry\n for axis in self._axesList:\n stepKey = '%sStep' % axis\n valuesKey = '%sValues' % axis\n if stepKey in source:\n if axis in source or valuesKey not in source:\n subsource[axis] = subsource.get(axis, 0) + idx * source[stepKey]\n else:\n subsource[valuesKey] = [\n val + idx * source[stepKey] for val in subsource[valuesKey]]\n del subsource['pathPattern']\n sources.append(subsource)", "def expand(self, path_list):\n path_list2 = []\n for path in path_list:\n if glob.has_magic(path):\n iterator = glob.iglob(path)\n path_list2.extend(iterator)\n else:\n path_list2.append(path)\n return path_list2", "def expand_paths(paths, cwd=None):\n return [expand_path(x, cwd) for x in paths]", "def generate_paths(dirname, recursive=False):\n if recursive:\n gen = itertools.chain.from_iterable(\n map(lambda x: map(partial(os.path.join, x[0]), x[2]),\n os.walk(dirname)))\n else:\n gen = filter(os.path.isfile,\n map(partial(os.path.join, dirname), os.listdir(dirname)))\n return filter(lambda s: re.match('.+\\.midi?$', s, re.I), gen)", "def _get_file_paths(self):\n return [os.path.join(self.path, self.mode, 'waveforms', file_name + '.npy') for file_name in self.file_names]", "def glob_files(sc, url,verbose):\n URI = sc._gateway.jvm.java.net.URI\n Path = sc._gateway.jvm.org.apache.hadoop.fs.Path\n FileSystem = sc._gateway.jvm.org.apache.hadoop.fs.FileSystem\n fs = FileSystem.get(URI(\"hdfs:///\"), sc._jsc.hadoopConfiguration())\n l = fs.globStatus(Path(url))\n return [f.getPath().toString() for f in l]", "def glob(self):\n self._deduplicate()\n result = []\n for entry in self._entries:\n pp = entry.posix_path()\n if GLOBBABLE_REGEX.search(pp):\n try:\n globs = glob.glob(entry.posix_path())\n result += globs\n except re.error:\n result.append(pp)\n else:\n result.append(pp)\n self._entries = [Path(g) for g in result]\n self._clean = False\n self._current = 0", "def _get_ais_paths(self) -> list:\n ais_files = []\n year = self.year\n end_year = self.year\n for month in range(1, 13):\n end_month = month + 1\n if month == 12:\n end_year += 1\n end_month = 1\n\n for vessel_type in self.vessel_types:\n path_template = f\"{vessel_type}_{year}{month:02}01-{end_year}{end_month:02}01_total.tif\"\n fname = self.dir / path_template\n ais_files.append(fname)\n\n return ais_files", "def recursive_glob(self, rootdir='.', suffix=''):\n return [os.path.join(rootdir, filename)\n for filename in sorted(os.listdir(rootdir)) if filename.endswith(suffix)]", "def glob_datasets(glob_paths: Sequence[str]):\n if len(glob_paths) == 0:\n raise ValueError(\"No dataset path provided.\")\n\n # Apply glob:\n paths = tuple(glob.glob(glob_path) for glob_path in glob_paths)\n\n if len(paths) == 0:\n raise ValueError(\"Could not find any dataset with provided paths\", glob_paths)\n\n # concatenate list of paths:\n paths = reduce(lambda u, v: u + v, paths)\n\n # remove empty paths:\n paths = (path.strip() for path in paths)\n\n # remove empty paths:\n paths = (path for path in paths if len(path) > 0)\n\n # sort paths:\n paths = sorted(list(paths))\n\n return open_joined_datasets(paths), paths", "def complete_paths(path, filenames):\n\treturn [ \"{0}{1}\".format(path, filenames[i]) for i in range(0, len(filenames)) ]", "def _resolve_paths(paths):\n allowed_ext = tuple(MIMES.keys())\n\n resolved = []\n for path in paths:\n if os.path.isdir(path):\n resolved.extend(\n entry.path for entry in os.scandir(path)\n if entry.is_file() and entry.name.lower().endswith(allowed_ext)\n )\n elif os.path.isfile(path) and path.lower().endswith(allowed_ext):\n resolved.append(path)\n return resolved", "def genome_paths(self, ext=\"fasta\"):\n return [\n os.path.join(self.path, genome)\n for genome in os.listdir(self.path)\n if genome.endswith(ext)\n ]", "def get_paths(args):\n log, rest = get_log_path(args)\n out, _ = get_out_path(args)\n temp, _ = get_temp_path(args)\n return log, out, temp, rest", "def _make_path_list(cfg, dir_name, file_name, rank=None):\n if not cfg.DATASET.IS_ABSOLUTE_PATH:\n assert len(dir_name) == 1 or len(dir_name) == len(file_name)\n if len(dir_name) == 1:\n file_name = [os.path.join(dir_name[0], x) for x in file_name]\n else:\n file_name = [os.path.join(dir_name[i], file_name[i])\n for i in range(len(file_name))]\n\n if cfg.DATASET.LOAD_2D: # load 2d images\n temp_list = copy.deepcopy(file_name)\n file_name = []\n for x in temp_list:\n suffix = x.split('/')[-1]\n if suffix in ['*.png', '*.tif']:\n file_name += sorted(glob.glob(x, recursive=True))\n else: # complete filename is specified\n file_name.append(x)\n\n file_name = _distribute_data(cfg, file_name, rank)\n return file_name", "def get_data_in_paths(dfile, paths):\n for pth in paths:\n for f in os.listdir(pth):\n if f == dfile:\n return os.path.abspath(os.path.join(pth, dfile))", "def paths_for_od(self, r, s):\n pass", "def resolve_file_paths(local_path):\n local_path = os.path.abspath(local_path)\n files = []\n if local_path.find('*') > -1:\n # Supplied path is a pattern - relative directory will be the\n # path up to the first wildcard\n ref_dir_str = local_path.split('*')[0].rstrip('/\\\\')\n if not os.path.isdir(ref_dir_str):\n ref_dir_str = os.path.dirname(ref_dir_str)\n ref_dir = pathlib.Path(ref_dir_str)\n pattern = local_path[len(ref_dir_str + os.pathsep):]\n files = [str(f) for f in ref_dir.glob(pattern) if f.is_file()]\n local_path = ref_dir_str\n else:\n if os.path.isdir(local_path):\n # Supplied path is a directory\n files = [os.path.join(local_path, f) for f in os.listdir(local_path)\n if os.path.isfile(os.path.join(local_path, f))]\n elif os.path.isfile(local_path):\n # Supplied path is a file\n files.append(local_path)\n local_path = os.path.dirname(local_path)\n return local_path, files", "def get_paths(input_folder):\n list_files = []\n conll_folder = glob.glob(input_folder + '/*.json')\n \n for filename in conll_folder:\n list_files.append(filename)\n\n return list_files", "def mkglob(fullpaths: list, trim=False) -> str:\n string_list = []\n glob = None\n for fname in fullpaths:\n if trim:\n fname = re.sub(r\"^.*/(.*)$\", r\"\\1\", fname)\n # fname = re.sub(r\"^(.*)\\.fits?(\\.fz)*$\", r\"\\1\", fname)\n fname = re.sub(r\"^([^\\.]*)\\..*$\", r\"\\1\", fname) # trim suffix\n string_list.append(fname)\n logging.debug(\"string_list[]={}\".format(string_list))\n if len(string_list) == 1:\n glob = string_list[0]\n elif len(string_list) > 1:\n # title is longest common substring array\n # joined with *'s to look like a glob pattern\n ss_arr = []\n get_lcs_array(string_list, ss_arr, 0, \"\", 2)\n if ss_arr:\n glob = \"{}\".format(\"*\".join(ss_arr))\n if not re.match(ss_arr[0], string_list[0]):\n glob = \"*{}\".format(glob)\n if not re.search(r\"{}$\".format(ss_arr[-1]), string_list[0]):\n glob = \"{}*\".format(glob)\n return glob", "def get_paths(self):\n return self.path.split(',')", "def to_path_globs(self, glob_match_error_behavior: GlobMatchErrorBehavior) -> PathGlobs:\n return self._generate_path_globs(\n (*self.file_includes, *self.dir_includes, *self.ignores), glob_match_error_behavior\n )", "def _resolveFramePaths(self, sourceList):\n # we want to work with both _basePath / <path> and\n # _basePath / .. / <path> / <name> to be compatible with Girder\n # resource layouts.\n sources = []\n for source in sourceList:\n if source.get('pathPattern'):\n self._resolvePathPatterns(sources, source)\n else:\n self._resolveSourcePath(sources, source)\n for source in sources:\n if hasattr(source.get('path'), 'resolve'):\n source['path'] = source['path'].resolve(False)\n return sources", "def get_files(pattern):\n\n files = [realpath(p) for p in glob2.glob(pattern)]\n return list(set(files))", "def to_path_globs(self, glob_match_error_behavior: GlobMatchErrorBehavior) -> PathGlobs:\n return self._generate_path_globs((*self.includes, *self.ignores), glob_match_error_behavior)", "def pairof_MOGUK_search_filepaths(date_string, n_hour, data_basepath=default_data_basepath):\n hr_string = \"%02d\" % n_hour\n glob_spec_strs = [data_basepath + date_string + hr_string + t_s for t_s in ('*speed10mmean.grib','*dir10mmean.grib')]\n return glob_spec_strs", "def paths(self):\n return list(zip(*self.collected))[0]", "def warping_paths(self):\n return self.paths", "def get_paths(self):\n return self.paths", "def resolve_specs(paths):\n specs = []\n for path in paths:\n if os.path.isdir(path):\n _, _, files = os.walk(path).next()\n specs.extend(os.path.join(path, fname) for fname in files)\n else:\n specs.append(path)\n return specs", "def get_midi_paths(root):\n\treturn [os.path.join(dirpath, filename) \n\t\t\tfor dirpath, _, filenames in os.walk(root) \n\t\t\tfor filename in filenames \n\t\t\tif filename.endswith(\".mid\")]", "def files(pathspec):\n\treturn [f for f in glob.glob(pathspec)]", "def frame_paths(self, indx):\n if isinstance(indx, (int,np.integer)):\n return os.path.join(self['directory'][indx], self['filename'][indx])\n return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])]", "def filepaths(self, langs) -> Generator[str, None, None]:\n for pv in self.projects(langs):\n yield from pv.filepaths()", "def segment_paths(root):\n directories = []\n history = history_path(root)\n for d in os.listdir(history):\n path = os.path.join(history, d)\n if os.path.isdir(path):\n directories.append(path)\n return sorted(directories)", "def all_image_paths(self):\n self.labels = [i for i in (self.get_immediate_subdirectories(self.root_dir))\n if not i.startswith('.')]\n\n for root, subFolders, files in os.walk(self.root_dir):\n files = [i for i in files if not i.startswith('.')]\n files = files[:self.img_num] # hard coded - will not read in\n for i in files:\n self.all_files.append(os.path.abspath(root) + '/'.join(subFolders) + '/' + i)", "def paths(self):\r\n return self._paths", "def parse_paths():\r\n sources = get_source_paths()\r\n results = collections.defaultdict(list)\r\n for root_dir in sources:\r\n\r\n for script_type, dirs in walkdirs(root_dir).iteritems():\r\n\r\n for d in dirs:\r\n logger.debug(d)\r\n\r\n # Add paths to environments\r\n if os.path.basename(d).lower().startswith(ICONS):\r\n results['XBMLANGPATH'].append(d)\r\n os.environ['XBMLANGPATH'] += os.pathsep + d\r\n\r\n if script_type == 'mel':\r\n results['MAYA_SCRIPT_PATH'].append(d)\r\n os.environ['MAYA_SCRIPT_PATH'] += os.pathsep + d\r\n else:\r\n results['PYTHONPATH'].append(d)\r\n site.addsitedir(d)\r\n return results", "def recursive_glob(rootdir='.', suffix=''):\n return [os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames if filename.endswith(suffix)]", "def localmod_paths(root_dir, parent_dir):\n return {\n 'F1' : os.path.join(root_dir, \"F1\"),\n 'F' : os.path.join(parent_dir, \"F\"),\n 'F2' : os.path.join(parent_dir, \"F2-local\"),\n 'D1' : os.path.join(root_dir, \"D1\"),\n 'D' : os.path.join(parent_dir, \"D\"),\n 'D2' : os.path.join(parent_dir, \"D2-local\"),\n }", "def aggregate(\n *passed_paths: str, recursive: bool = False\n) -> List[pathlib.Path]:\n\n stack = []\n\n for passed_path in passed_paths:\n path = pathlib.Path(passed_path)\n\n if path.is_file():\n stack.append(path)\n\n return stack", "def _glob_files(directories, extensions):\n pwd = Path(__file__).resolve().parent\n open3d_root_dir = pwd.parent\n\n file_paths = []\n for directory in directories:\n directory = open3d_root_dir / directory\n for extension in extensions:\n extension_regex = \"*.\" + extension\n file_paths.extend(directory.rglob(extension_regex))\n file_paths = [str(file_path) for file_path in file_paths]\n file_paths = sorted(list(set(file_paths)))\n return file_paths", "def _walk_paths(self, paths):\r\n for path in sorted(paths):\r\n if os.path.isdir(path):\r\n for dir_name, _, filenames in sorted(os.walk(path)):\r\n for filename in filenames:\r\n filename = os.path.join(dir_name, filename)\r\n yield os.path.relpath(filename, path), filename\r\n else:\r\n yield os.path.basename(path), path", "def handle_files_args(*paths_args):\n paths = []\n\n for paths_arg in paths_args:\n # Handle paths implicitly rooted at user home dir\n paths_arg = os.path.expanduser(paths_arg)\n\n # Expand wildcards\n paths_arg = glob.glob(paths_arg)\n\n # Create list of pathlib.Path objects\n paths.extend([pathlib.Path(path_arg) for path_arg in paths_arg])\n\n return paths", "def glob(self, glob_expr: str) -> Iterator[NicePath]:\n for path in self._root.glob(glob_expr):\n relative_path = path.relative_to(self._root)\n if not self._match_include(relative_path):\n continue\n if self._match_exclude(relative_path):\n continue\n\n yield NicePath(path)", "def _real_paths(config):\n for key in ('--config', '--ffmpeg-bin', '--log', '--music-source', '--working-dir'):\n if not config[key]:\n continue\n config[key] = os.path.realpath(os.path.expanduser(config[key]))", "def prep_files(app):\n smali_paths = []\n start = time.time()\n \n for root, dirs, files in os.walk(app, topdown=False):\n for name in files:\n if name[-6:] == \".smali\":\n smali_paths.append(str(os.path.join(root, name)))\n \n return smali_paths", "def get_paths(self):\n return (self.world_fpath, self.subj_fpath, self.peds_fpath)", "def _path_files(self):\n\n if not os.path.exists(self.path):\n return None\n\n directory_content = os.listdir(self.path)\n files = []\n\n while len(directory_content) != 0:\n\n if not directory_content[0].startswith(self.path):\n directory_obj = os.path.join(self.path, directory_content[0])\n else:\n directory_obj = directory_content[0]\n\n if os.path.isfile(directory_obj):\n files.append(directory_obj)\n elif os.path.exists(directory_obj):\n temp_directory_content = os.listdir(directory_obj)\n for obj in temp_directory_content:\n directory_content.append(os.path.join(directory_obj, obj))\n directory_content.pop(0)\n\n return files", "def fullpathlist(path):\n try:\n return [os.path.join(path, filename) for filename in os.listdir(path)]\n except OSError:\n return []", "def do_filepath_forloop(self, line):\n self.E_str = \"do_filepath_forloop\"\n line = line.replace(\" \", \"\")\n line = line[line.find(\"filepath\")+5:]\n filepath_str, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n filepath_str = gen_parse.rm_quotation_marks(filepath_str)\n\n all_filepaths = glob.glob(filepath_str)\n if not all_filepaths:\n self.print_warning(\"I can't find anything matching the filepath you've enterred!\")\n\n return all_filepaths", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n return [\n os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames\n if filename.endswith(suffix)\n ]", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n return [\n os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames\n if filename.endswith(suffix)\n ]", "def paths(self, toNative=True):\n if self.__mode == E5PathPickerModes.OpenFilesMode:\n return self.path(toNative=toNative).split(\";\")\n else:\n return [self.path(toNative=toNative)]", "def getImmediateSubdirectories(dir):", "def get_paths_list_from_folder(folder):\n names = os.listdir(folder)\n relative_paths = [os.path.join(folder, image_name) for image_name in names]\n return relative_paths", "def scandir(self):\n return (self._join(dir_entry) for dir_entry in scandir(self.absolute))", "def getFullPath(pathfil, direct='/media/alex/BACKUP/mcgenco/', outfile=None):\n if '/' in pathfil:\n pshorts = []\n with open(pathfil, 'r') as fIn:\n for line in fIn:\n pshorts.append(line.strip())\n else:\n pshorts = [pathfil]\n \n # Get the full paths\n paths = []\n for nam in pshorts:\n p = subprocess.Popen(['find', direct, '-name', nam],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n out = str(p.stdout.read())\n out = '/'.join(out.split('/')[1:])\n out = '/'+out.split('\\\\')[0]\n paths.append(str(out.strip()))\n \n if outfile is not None:\n with open(outfile, 'w') as fOut:\n for p in paths:\n fOut.write(p)\n fOut.write('\\n')\n print('%s written' %outfile)\n return\n return paths", "def get_fastq_files(wildcards):\n return expand(os.path.join(fastq_dir, \"{sample}_{readpair}.fastq\"), readpair=[1, 2], **wildcards)", "def paths(self):\n return self._paths", "def paths(self):\n return self._paths", "def batchfiles(fdir='.'):\n \n os.chdir(fdir)\n names = glob.iglob('*')\n # Absolute path rather than relative path allows changing of directories in fn_name.\n names = sorted([os.path.abspath(name) for name in names])\n return(names)", "def get_data_files():\n\n data_files = []\n for d, dirs, filenames in os.walk(share_jupyterhub):\n rel_d = os.path.relpath(d, here)\n data_files.append((rel_d, [os.path.join(rel_d, f) for f in filenames]))\n return data_files", "def paths(self, return_indices=False):\n paths = []\n for tree in self.components():\n paths += self._single_tree_paths(tree, return_indices=return_indices)\n return paths", "def get_all_img_paths(path_to_folder):\n all_subfolders = glob.glob(path_to_folder + '*')\n all_paths = []\n for folder in all_subfolders:\n all_paths.extend(glob.glob(folder + '/*'))\n # get relative paths\n common_prefix = path_to_folder\n relative_paths = [os.path.relpath(path, common_prefix) for path in all_paths]\n return relative_paths", "def many_single_asset_paths(n_paths, spot, generator, time0, time1, r_param, vol_param):\n assert(n_paths>0)\n if n_paths==1:\n return single_path(spot, generator, time0, time1, r_param, vol_param)\n r,var,mu,discount = get_path_constants(time0, time1,r_param, vol_param)\n rand_vals = generator.get_samples(n_paths)\n #print(\"rands = \", rand_vals)\n future_spots = spot*np.exp(mu)\n future_spots *= np.exp(np.sqrt(var)*rand_vals)\n #future_spots *= discount\n return future_spots", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def get_raw_image_paths_for_experiment(local_sync_directory_path, experiment_directory):\n raw_images_directory = os.path.join(local_sync_directory_path, experiment_directory)\n raw_image_paths = get_files_with_extension(raw_images_directory, \".jpeg\")\n return pd.Series(raw_image_paths)", "def get_sar_paths(directory_path: str) -> list:\n dataset_path = Path(directory_path)\n\n path_generator = dataset_path.rglob('*.tif')\n paths = sorted([path for path in path_generator if path.is_file()])\n return [sar_set(*g) for k, g in groupby(paths, key=lambda path: re.match(TYPE_REGEX, path.name)[2])]", "def _GetSubPathForNames(self, names):\n return [(self._module_dir, self._module_path + [name], name,\n self.ReleaseTrack())\n for name in names]", "def local_paths(self) -> List[Path]:\n return self._local_paths", "def storer_paths():\n return [dir_unchecked(), dir_checked(),\n dir_backup(), dir_tests()]" ]
[ "0.6839766", "0.66446954", "0.66331553", "0.6385926", "0.6355523", "0.62292236", "0.6208013", "0.60792667", "0.6063598", "0.60258174", "0.6019291", "0.6018035", "0.60155195", "0.60125214", "0.60032433", "0.5999792", "0.5935156", "0.59334236", "0.5899728", "0.5886695", "0.5886695", "0.5857036", "0.58525324", "0.58318067", "0.58215624", "0.5811824", "0.58065593", "0.5797119", "0.57954437", "0.5791977", "0.5782345", "0.57693434", "0.5768016", "0.57585114", "0.5746188", "0.5740741", "0.5738368", "0.5736661", "0.5732194", "0.57308257", "0.57270867", "0.5680777", "0.567966", "0.5669277", "0.56648207", "0.5664245", "0.56621444", "0.5653076", "0.5650992", "0.56474406", "0.56461656", "0.5639344", "0.56367886", "0.56283545", "0.5626271", "0.56109715", "0.56090635", "0.56051654", "0.55974966", "0.5596651", "0.5596564", "0.5594394", "0.55889386", "0.55888236", "0.5583139", "0.55812263", "0.55698735", "0.5568722", "0.55680746", "0.55435073", "0.55401105", "0.5539996", "0.5535556", "0.55332386", "0.55320656", "0.5526933", "0.55156773", "0.5514748", "0.5508304", "0.5506218", "0.5506218", "0.5505404", "0.5501765", "0.54998946", "0.54988366", "0.54978675", "0.5496115", "0.5495242", "0.5495242", "0.54940873", "0.54912704", "0.5490662", "0.54893315", "0.54770935", "0.5473749", "0.54714537", "0.54705197", "0.5462713", "0.5457757", "0.5450806" ]
0.6378503
4
One solution would be to do an inorder traversal and sum the values along the way (or just recursive sum along the tree). => O(N) but in case the range [lo,hi] is small, this is wasteful.
def rangeSumBST(self, root: TreeNode, lo: int, hi: int) -> int: def visit(node: TreeNode) -> int: if not node: return 0 if node.val < lo: return visit(node.right) elif hi < node.val: return visit(node.left) else: return node.val + visit(node.left) + visit(node.right) return visit(root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSum(root, level, h):\n if root == None:\n return\n \n h[level] = root.data\n \n getSum(root.left, level+1, h)\n getSum(root.right, level+1, h)", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total", "def reduce(self, start: int = 0, end: Optional[int] = None) -> Any:\n if end is None:\n end = self.capacity\n elif end < 0:\n end += self.capacity\n\n # Init result with neutral element.\n result = self.neutral_element\n # Map start/end to our actual index space (second half of array).\n start += self.capacity\n end += self.capacity\n\n # Example:\n # internal-array (first half=sums, second half=actual values):\n # 0 1 2 3 | 4 5 6 7\n # - 6 1 5 | 1 0 2 3\n\n # tree.sum(0, 3) = 3\n # internally: start=4, end=7 -> sum values 1 0 2 = 3.\n\n # Iterate over tree starting in the actual-values (second half)\n # section.\n # 1) start=4 is even -> do nothing.\n # 2) end=7 is odd -> end-- -> end=6 -> add value to result: result=2\n # 3) int-divide start and end by 2: start=2, end=3\n # 4) start still smaller end -> iterate once more.\n # 5) start=2 is even -> do nothing.\n # 6) end=3 is odd -> end-- -> end=2 -> add value to result: result=1\n # NOTE: This adds the sum of indices 4 and 5 to the result.\n\n # Iterate as long as start != end.\n while start < end:\n\n # If start is odd: Add its value to result and move start to\n # next even value.\n if start & 1:\n result = self.operation(result, self.value[start])\n start += 1\n\n # If end is odd: Move end to previous even value, then add its\n # value to result. NOTE: This takes care of excluding `end` in any\n # situation.\n if end & 1:\n end -= 1\n result = self.operation(result, self.value[end])\n\n # Divide both start and end by 2 to make them \"jump\" into the\n # next upper level reduce-index space.\n start //= 2\n end //= 2\n\n # Then repeat till start == end.\n\n return result", "def sumRangeTree2(self, i, j, cur):\n if i > j:\n return 0\n start, end = cur.start, cur.end\n if i == start and j == end:\n return cur.val\n mid = start+(end-start)/2\n return self.sumRangeTree(i, min(j, mid), cur.left) + self.sumRangeTree(max(mid+1, i), j, cur.right)", "def recursiveSums(desiredNum, values, depth=0, max_depth=5):\n depth+=1\n if(depth>max_depth):\n return\n if(len(values)==1):\n if(values[0]==desiredNum):\n return values[0]\n else:\n arr = []\n removals = []\n for i, value in enumerate(values):\n thisDesiredNum = desiredNum-value\n if(thisDesiredNum==0):\n arr.append(value)\n elif(thisDesiredNum>0):\n #quick fix prevents double counting here\n newValues = [l for l in values if(l not in removals)]\n newValues.pop(newValues.index(value))\n arr.append([value])\n if(len(newValues)!=0 and sum(newValues)>=thisDesiredNum):\n newSums = recursiveSums(thisDesiredNum, newValues, depth, max_depth)\n if(newSums):\n if(isinstance(newSums, int)):\n arr.append([newSums])\n else:\n arr[-1].extend(newSums)\n if(len(arr[-1])==0 or arr[-1]==[value]):\n arr.pop()\n removals.append(value)\n #remove unusable values\n iteratedValues = [value for value in values if(value not in removals)]\n if(iteratedValues):\n arr.append(recursiveSums(desiredNum, iteratedValues, depth, max_depth))\n return arr", "def value(d,o):\n # return memoized value if possible\n if (d,o) in v:\n return v[(d,o)]\n\n thisitem = int(t[d][o])\n # the total of a subtree that starts at the leaf, is just the value of the leaf\n if d == maxdepth:\n val = thisitem\n else:\n val = thisitem + max(value(d+1,o),value(d+1,o+1))\n\n v[(d,o)]=val\n return val", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def summationRecursion(lower, upper):\r\n if lower > upper:\r\n return 0\r\n else:\r\n return lower + summationRecursion(lower + 1, upper)", "def find_sum(root, desired_sum, level=0, buffer_list=None, result=[]):\n if not buffer_list:\n buffer_list = []\n\n if not root:\n return result\n\n buffer_list.append(root.key)\n temp = desired_sum\n\n for i in range(level, -1, -1):\n temp -= buffer_list[i]\n\n if temp == 0:\n result.append(buffer_list[i:level + 1])\n\n find_sum(root.left, desired_sum, level + 1, buffer_list[:], result)\n find_sum(root.right, desired_sum, level + 1, buffer_list[:], result)\n\n return result", "def op_sum(self, args):\n sum = 0\n stack_levels = len(self.stack)\n if args != None:\n stack_levels = int(args[0])\n self.require_stack(stack_levels)\n for i in range(0, stack_levels):\n sum += self.stack.pop()\n self.stack.append(sum)", "def fn(node, x):\n if not node: return x\n x = fn(node.right, x) # sum of right subtree\n x += node.val \n node.val = x\n return fn(node.left, x)", "def getSum2(root, level=0, maxLevel=None, sum=None):\n if root == None:\n return 0\n \n if maxLevel == None:\n maxLevel = [-1]\n sum = [0]\n \n if maxLevel[0] < level:\n sum[0] += root.data\n maxLevel[0] = level\n \n getSum2(root.right, level+1, maxLevel, sum) \n getSum2(root.left , level+1, maxLevel, sum)\n\n if level == 0:\n return sum[0]", "def sum_values(values):\n return (sum(values))", "def sum_of_nodes(t):\n return label(t) + sum([sum_of_nodes(b) for b in branches(t)])", "def binary_sums(start, limit):\n for n in range(start, limit):\n for i in range(1, n/2 + 1):\n yield i, n - i", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total", "def sum(self) -> int:\n return self.root.sum", "def sumTo(n):\n\n sum_all = (n * (n+1))/2\n\n return sum_all", "def total(tree):\n if tree is None:\n return 0\n return total(tree.left) + total(tree.right) + tree.cargo", "def total(h):\r\n\treturn sum(i.points() for i in h)", "def sum_node_depths(node, current_sum, level):\n # Base case\n if node is None:\n return current_sum\n\n current_sum += level\n current_sum = sum_node_depths(node.left, current_sum, level + 1)\n current_sum = sum_node_depths(node.right, current_sum, level + 1)\n\n return current_sum", "def segment_sum(self, left, right):\n if self.empty():\n return 0\n less, _ = self.root.split_sums(left)\n _, greater = self.root.split_sums(right)\n return self.sum - less - greater", "def sum_elements(arr):\n return sum(arr)", "def compute_node_sums(nodes):\n for node in nodes:\n node.children_summed = 0 # Dynamically add a meta field to Node to improve runtime when computing sums.\n\n leaf_nodes = []\n for node in nodes:\n if len(node.children) == 0:\n leaf_nodes.append(node)\n to_process = leaf_nodes\n while to_process:\n node = to_process.pop()\n # if leaf_node or all child notes computed their sum.\n if len(node.children) == 0 or len(node.children) == node.children_summed:\n node.sum = node.value\n if len(node.children) > 0:\n node.sum = node.sum + sum([child.sum for child in list(node.children.values())])\n if node.parent:\n node.parent.children_summed += 1\n if len(\n node.parent.children) == node.parent.children_summed: # all children have computed their sums\n to_process.append(node.parent)\n\n for node in nodes:\n del node.children_summed", "def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n vals.append(ans)\n return ans", "def sum(n):\n if n == 0:\n return 0\n return sum(n - 1) + n", "def sum_of_numbers(numbers):\r\n return sum(numbers)", "def nodalSum2(val,elems,tol):\n nodes = unique1d(elems)\n for i in nodes:\n wi = where(elems==i)\n vi = val[wi]\n ai,ni = average_close(vi,tol=tol)\n ai /= ni.reshape(ai.shape[0],-1)\n val[wi] = ai", "def calculate_sum(self):\n\n left_sum = self.left.calculate_sum() if self.left else 0\n right_sum = self.right.calculate_sum() if self.right else 0\n return self.data + left_sum + right_sum", "def recursive_sum(lst):\n\n if lst == []:\n return 0\n\n else:\n\n return lst[0] + recursive_sum(lst[1:])", "def summationLoop(lower, upper):\r\n sum = 0\r\n for i in range(lower, upper + 1):\r\n sum += i\r\n return sum", "def largest_sum_nonadjacents_numbers_1(L: List[int]) -> int:\n cache_n2 = 0\n cache_n1 = 0\n best_sum = 0\n for l in L:\n best_sum = max(l + cache_n2, cache_n1)\n # update cache for next iteration\n cache_n2 = cache_n1\n cache_n1 = best_sum\n\n return best_sum", "def sumTo(n):\n \n the_sum = 0 #current sum\n a_number = 1 #where we are\n while a_number <= n:\n the_sum += a_number\n a_number += 1\n return the_sum", "def sumRange(self, i, j):\n if not self.nums: return 0 # edge case\n return self.sum(j+1)-self.sum(i)", "def sum_in_list_dyn(number_list, total_value):\n memoization = [[False for i in range(total_value + 1)] for i in range(len(number_list) + 1)]\n\n for i in range(len(number_list) + 1):\n # We can always make 0 with the empty set.\n memoization[i][0] = True\n\n for i in range(1, len(number_list) + 1):\n for j in range(1, total_value + 1):\n if j < number_list[i - 1]:\n memoization[i][j] = memoization[i - 1][j]\n else:\n memoization[i][j] = memoization[i - 1][j] or memoization[i - 1][j - number_list[i - 1]]\n\n return memoization[len(number_list)][total_value]", "def get_sum(self, i):\n s = 0\n\n # index in BITree is 1 more than index in arr[]\n i += 1\n\n # Traverse to leaves of BITree[i]:\n while i > 0:\n s += self.BITree[i]\n\n # Move index to parent node (next set bit in binary representation)\n i -= i & (-i)\n\n return s", "def fn(node, val):\n if not node: return 0\n val = 10*val + node.val\n if not node.left and not node.right: return val \n return fn(node.left, val) + fn(node.right, val)", "def level_sums(n,r,s):\n# from numpy import zeros\n from copy import copy\n OutSet=[]\n# a=zeros(r,int)\n a=[0 for i in range(r)]\n if(s):\n t=n\n h=-1\n a[0]=n\n s=False\n OutSet.append(copy(a))\n else:\n return OutSet \n while (a[r-1] != n):\n if(1<t):\n h=-1\n h+=1\n t=a[h]\n a[h]=0\n a[0]=t-1\n a[h+1]+=1\n OutSet.append(copy(a))\n else:\n return OutSet", "def sum_to(n):\n the_sum = 0\n for counter in range(n+1):\n the_sum = the_sum + counter\n return the_sum", "def graph_traversal_sum(values_in, connections_in, nodes_start, nodes_end):\n # Make sure that original objects are not changed\n values = values_in.copy()\n connections = connections_in.copy()\n\n # End node\n node_end = next(iter(nodes_end))\n\n # Function to calculate the path from a given sink to the source \n def sink_path(connections_in, node_start, nodes_start, node_end, path=None, last_junction=None):\n # List with the nodes that make the path from node_start until node_end\n path = path if path else [node_start]\n # Size of the connections matrix\n nodes_length = len(connections_in[node_start])\n # List of nodes connected to the current node that are not yet in the calculated path and are not sinks\n next_node = [x for x in range(0, nodes_length) if connections_in[path[-1], x] == 1 and x not in nodes_start and x not in path]\n # Last node in the path that is a junction (intersection of more than 2 nodes)\n if len(next_node) > 1:\n last_junction = path[-1]\n # Iterate over all possible connections from the current node\n for node in next_node:\n if node not in path:\n # Add (temporarily) the next possible node in the path\n path.append(node)\n if node == node_end:\n # Source, end of path\n return path\n # Calculate path again from current node\n path = sink_path(connections_in, node_start, nodes_start, node_end, path, last_junction)\n # The next possible node is a sink, this path is not valid, remove all nodes in the path from the last junction\n if not next_node:\n index = len(path) - 1\n if last_junction is not None:\n index = path.index(last_junction) + 1\n path = path[0:index]\n return path\n\n # Calculate all paths from sinks to the source\n paths = []\n for node in nodes_start:\n paths.append(sink_path(connections, node, nodes_start, node_end))\n\n # Function to sum up the node values of all paths in the graph\n def path_sum(connections_in, paths, values_in):\n # Size of the connections matrix\n nodes_length = len(connections_in[0])\n # Matrix with the output graph traversal sum\n connections_out = np.zeros(shape=(nodes_length, nodes_length))\n # Iterate over all elements in the matrix and add the initial values for every path for every node\n for x in range(0, nodes_length):\n for y in range(0, nodes_length):\n # If the given element of connections_in is not zero, is a node of the graph\n if connections_in[x, y] > 0:\n # Iterate over all paths\n for path_index in range(0, len(paths)):\n path = paths[path_index]\n for i in range(0, len(path) - 2):\n # Check if the current element of the matrix is part of the current path\n if path[i] == x and path[i + 1] == y:\n # Add the value coming from the corresponding path\n connections_out[x, y] += values_in[x, y]\n # The matrix is symmetric, add the transpose of the calculated matrix\n connections_out = np.maximum( connections_out, connections_out.transpose())\n return connections_out\n\n return path_sum(connections, paths, values)", "def find_level_maxsum (self):\r\n level_queue = [self]\r\n next_level_queue = []\r\n curr_level = 0\r\n max_sum = -sys.maxsize\r\n while level_queue:\r\n curr_node = level_queue.pop(0)\r\n if curr_node.left:\r\n next_level_queue.append(curr_node.left)\r\n if curr_node.right:\r\n next_level_queue.append(curr_node.right)\r\n if not level_queue:\r\n sum_value = 0\r\n for nodes in next_level_queue:\r\n sum_value += nodes.root\r\n if sum_value > max_sum:\r\n max_sum = sum_value\r\n curr_level += 1\r\n level_queue = next_level_queue[:]\r\n next_level_queue = []\r\n if self.root> max_sum:\r\n max_sum = self.root\r\n return max_sum", "def sum_range(self, lower, upper):\n if upper>self.upper:\n upper=self.upper\n if lower<self.lower:\n lower = self.lower\n\n i_l = int(np.floor((lower-self.lower)/self._dx))\n i_u = int(np.floor((upper-self.lower)/self._dx))\n total = 0.0\n for i in range(i_l,i_u):\n total+= self.y[i]\n return total", "def rec_AbSum(p):\n if p == []:\n return 0\n return abs(p.pop()) + rec_AbSum(p)", "def sum_of_tree(root_elem):\r\n\tif root_elem is None:\r\n\t\treturn 0\r\n\treturn root_elem.value + sum_of_tree(root_elem.left) + sum_of_tree(root_elem.right)", "def count_value(tree,val):\r\n if (tree==None):\r\n return 0\r\n elif(value(tree)==val):\r\n return 1+count_value(left(tree), val)+count_value(right(tree), val)\r\n else:\r\n return count_value(left(tree), val)+count_value(right(tree), val)", "def _calculate_new_probability_sum(node, value):\r\n\r\n card = node.cardinality - 1\r\n\r\n if not value in node.unique_vals:\r\n\r\n i = 0\r\n length = len(node.prob_sum)\r\n while length > i + 1 and node.unique_vals[i] < value:\r\n i += 1\r\n idx = i\r\n\r\n if idx > 0:\r\n node.prob_sum[1:idx + 1] *= card / (card + 1)\r\n\r\n if idx + 1 < length - 1:\r\n node.prob_sum[idx + 1:length - 1] = node.prob_sum[idx + 1:length - 1] * card / (card + 1) + 1 / (card + 1)\r\n\r\n node.prob_sum = np.insert(node.prob_sum, idx + 1, node.prob_sum[idx] + 1 / (card + 1))\r\n\r\n node.unique_vals = np.insert(node.unique_vals, idx, value)\r\n length += 1\r\n\r\n else:\r\n node.prob_sum *= card / (card + 1)\r\n\r\n idx_array = np.where(node.unique_vals == value)\r\n idx = int(idx_array[0])\r\n length = len(node.prob_sum)\r\n if idx + 1 < length - 1:\r\n node.prob_sum[idx + 1:length] += 1 / (card + 1)\r\n\r\n node.prob_sum[length - 1] = 1.0", "def get_nested_sum():\n l_int = [1,2,[], 3,[4,[], 5,[6]],[7],[8,9], 10,[[],11]]\n print 'Sum:', nested_sum(l_int) \n return", "def sum_range(nums, start=0, end=None):\n n = len(nums)\n if end == None:\n nz = n\n elif end > n:\n nz = n\n else:\n nz = end\n\n return sum(nums[start:nz + 1])", "def get_sum(self, node: Optional[TreeNode]) -> int:\n if not node:\n return 0\n l_sub_sum, r_sub_sum = self.get_sum(node.left), self.get_sum(node.right)\n self.ans += abs(l_sub_sum - r_sub_sum)\n\n return node.val + l_sub_sum + r_sub_sum", "def explore(self, nums, left, right, target):\n diff = sys.maxsize\n\n while left < right:\n cur_sum = nums[left] + nums[right]\n if cur_sum == target:\n return 0\n \n if abs(target - cur_sum) < abs(diff):\n diff = target - cur_sum\n if cur_sum < target:\n left += 1\n else:\n right -= 1\n return diff", "def sum_series(n, zeroth, oneth):\n\n if (n == 0):\n return zeroth\n elif (n == 1):\n return oneth\n else:\n return sum_series(n - 1, zeroth, oneth) + \\\n sum_series(n - 2, zeroth, oneth)", "def set(self, node_index, value):\n if value < 0.0:\n raise ValueError(\n 'Sum tree values should be nonnegative. Got {}'.format(value))\n self.highest_set = max(node_index, self.highest_set)\n node_index = node_index + self.low_idx\n self.max_recorded_priority = max(value, self.max_recorded_priority)\n\n delta_value = value - self.nodes[node_index]\n\n # Now traverse back the tree, adjusting all sums along the way.\n for _ in reversed(range(self.depth)):\n # Note: Adding a delta leads to some tolerable numerical inaccuracies.\n self.nodes[node_index] += delta_value\n node_index = (node_index - 1) // 2\n\n self.nodes[node_index] += delta_value\n assert node_index == 0, ('Sum tree traversal failed, final node index '\n 'is not 0.')", "def nodalSum(val,elems,work,avg):\n nodes = unique1d(elems)\n for i in nodes:\n wi = where(elems==i)\n vi = val[wi]\n if avg:\n vi = vi.sum(axis=0)/vi.shape[0]\n else:\n vi = vi.sum(axis=0)\n val[wi] = vi", "def total_nodes(self)->int:\n\t\tqueue=[]\n\t\tsum=0\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tsum+=1\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn sum", "def summationReduce(lower, upper):\r\n if lower > upper:\r\n return 0\r\n else:\r\n return reduce(lambda x, y: x + y, range(lower, upper + 1))", "def fn(node):\n nonlocal ans\n if not node: return 0 \n sm = fn(node.left) + fn(node.right)\n if sm == node.val: ans += 1\n return sm + node.val", "def sum_list(numbers):\n\t\n\tif len(numbers) == 0:\n\t\treturn 0 \n\n\tsum = numbers[0] +sum_list(numbers[1:])\n\treturn sum", "def divisors_sum(upper=10**5):\n nums = [0] * (upper + 1)\n for i in range(1, upper + 1):\n for j in range(i, upper + 1, i):\n nums[j] += i\n return nums", "def list_sum_range_finder(lst, target):\n for n in range(2, len(lst)):\n for sublist in zip(*[lst[x:] for x in range(n)]):\n if sum(sublist) == target:\n return min(sublist) + max(sublist)", "def depthSum(self, nestedList: List[NestedInteger]) -> int:\n final_sum = 0\n def dfs(nlist,depth):\n nonlocal final_sum\n #no base case\n \n #logic\n for ele in nlist:\n if ele.isInteger():\n #add the value to the sum\n final_sum += ele.getInteger() * depth\n else:\n dfs(ele.getList(),depth+1)\n dfs(nestedList,1)\n return final_sum", "def sum_for_list(lst):\n list_of_nods = []\n for num in lst:\n temp_list = simple_nod(abs(num))\n for item in temp_list:\n if item not in list_of_nods:\n list_of_nods.append(item)\n result = []\n for nod in list_of_nods:\n flag = False\n sum = 0\n for num in lst:\n if not num % nod:\n sum += num\n flag = True\n if flag:\n result.append([nod, sum])\n return sorted(result, key=lambda x: x[0])", "def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n freq[ans] += 1\n return ans", "def add_up(num):\n aList = list(range(1, num + 1))\n sum = 0\n\n for item in aList:\n sum = add_together(sum, item)\n# print(\"NOW SUM IS: \" + str(sum))\n\n return sum", "def sum_numbers(sequence):\r\n\r\n total = 0\r\n seq = get_numbers(sequence)\r\n for element in seq:\r\n total += element\r\n\r\n return total", "def sum_natural(n):\n total, curr = 0 , 1\n\n while curr <= n:\n total, curr = total + curr, curr + 1\n return total", "def radical_sum(num, level):\n if level == 0:\n return 0\n \n return ((radical_sum(num, level - 1) + num) ** 0.5)", "def test_large_sum(self):\n for n in [10, 20, 30, 40, 50]:\n A = np.arange(n*n)\n A = np.reshape(A, (n, n))\n x = Variable(n, n)\n p = Problem(Minimize(at.sum_entries(x)), [x >= A])\n result = p.solve()\n answer = n*n*(n*n+1)/2 - n*n\n print(result - answer)\n self.assertAlmostEqual(result, answer)", "def integer_sum(n):\n\n sum = 0\n k = 0\n\n # INVARIANT\n # The sum of far is equal to the sum of the first k integer numbers\n # VARIANT: n-k\n #\n while (k!=n):\n k += 1\n sum += k\n\n return sum", "def sum_pairs(arr: list, sum: int):\n pair_count = 0\n count_map = {}\n for i in arr:\n if i in count_map:\n count_map[i] += 1\n else:\n count_map[i] = 1\n \n for key, value in count_map.items():\n if (sum - key) in count_map:\n count1 = value\n count2 = count_map[sum - key]\n if count1 == count2 and count1 > 1:\n pair_count += int(count1 * (count1 - 1) / 2)\n else:\n pair_count += count1 * count2\n count_map[key] = 0\n count_map[sum - key] = 0\n\n return pair_count", "def solution(A):\n \"\"\"method 2 n**2\n east=[] #0\n west=[] #1\n for i in range(len(A)):\n if A[i] == 0:\n east.append(i)\n else:\n west.append(i)\n\n result = 0\n for e in east:\n count = 0\n for j in range(len(west)):\n if e > west[j]:\n continue\n if e < west[j]:\n count = len(west) - j\n result += count\n #print(e, count)\n break\n return result\n \"\"\"\n east=[] #0\n west=[] #1\n l = len(A)\n for i in range(len(A)):\n if A[i] == 0:\n east.append(i)\n else:\n west.append(i)\n\n result = {}\n for i in range(len(east)):\n e = east[i]\n if i == 0:\n result[e] = l - e - len(east)\n if i != 0:\n result[e] = result[east[i-1]] - (e - east[i-1]-1)\n\n #print(result)\n s = sum(result.values())\n if s > 1000000000:\n return -1\n return s", "def path_sum(self, node):\n if self.is_leaf(node):\n return node.val, node.val\n if node is None:\n return 0, -2147483648\n\n left, sub1 = self.path_sum(node.left)\n right, sub2 = self.path_sum(node.right)\n left = left if left > 0 else 0\n right = right if right > 0 else 0\n\n if left > right:\n maximum_path = node.val + left\n else:\n maximum_path = node.val + right\n\n sub_result = max(max(sub1, sub2), node.val + left + right)\n return maximum_path, sub_result", "def sum_n_m(n, m):\n total = 0\n for i in range(n, m+1):\n total += i\n return total", "def sumTotal(n):\n\n sum_total = 0\n\n for i in range(1, n+1):\n sum_total = sum_total + i\n\n return sum_total", "def sum_to_n(n):\n total = 0\n for i in range(1,n+1):\n total += i\n return total", "def apply(H, x, g=lambda v, e: np.sum(v[list(e)])):\n new_x = np.zeros(H.num_nodes)\n for edge in H.edges.members():\n edge = list(edge)\n # ordered permutations\n for shift in range(len(edge)):\n new_x[edge[shift]] += g(x, edge[shift + 1 :] + edge[:shift])\n return new_x", "def sum_series(n,zero_val=0,one_val=1):\n if n==0:\n return zero_val\n elif n==1:\n return one_val\n else:\n return sum_series(n-1,zero_val,one_val) + sum_series(n-2,zero_val,one_val)", "def slice_sum(lst, begin, end):\n if begin > end or begin > len(lst) - 1 or end > len(lst) - 1:\n raise IndexError\n if begin < end:\n return lst.index(begin) + slice_sum(lst, begin + 1, end)\n return 0", "def recursive_sum(input_data, row_number):\n for i in range(len(input_data[row_number])):\n # Find the sums of the item with its lower left and lower right items (the row below)\n # Then replace this item with the larger of the sum\n input_data[row_number][i] += max([input_data[row_number + 1][i], input_data[row_number + 1][i + 1]])\n\n # If it is the top row, then we are done\n if len(input_data[row_number]) == 1:\n return input_data[row_number][0]\n\n # If not the top row, then use recursive\n return recursive_sum(input_data, row_number - 1)", "def leaf_count(T):\n if T.is_leaf:\n return 1\n else:\n# s = 0\n# for child in T:\n# s += leaf_count(child)\n# return s\n return reduce(add, map(leaf_count, T))", "def total(n):\n if n < 0:\n return None\n else:\n result = 0\n for i in range(n + 1):\n result += i\n return result", "def total(n):\n if n < 0:\n return None\n else:\n result = 0\n for i in range(n + 1):\n result += i\n return result", "def fn(n, k):\n if n == 1: return k # base case \n return sum(fn(n-1, kk) for kk in range(1, k+1))", "def count_entries(numbers):\n nodes = numbers[0]\n mt_entries = numbers[1]\n\n total = 0\n offset = 2\n for _ in range(nodes):\n entries, value = count_entries(numbers[offset:])\n offset += entries\n total += value\n\n for entry in numbers[offset:offset+mt_entries]:\n total += entry\n offset += 1\n return offset, total", "def sum(self) -> float:\n return sum(self.values)", "def sum_of_nth( n ):\n if n > 0:\n return sum( range(n + 1) )\n else:\n return 0", "def fn(i, k):\n if i == len(nums): return 0\n if k < 0: return inf \n ans = inf\n rmx = -inf # range max \n rsm = 0 # range sum \n for j in range(i, len(nums)): \n rmx = max(rmx, nums[j])\n rsm += nums[j]\n ans = min(ans, rmx*(j-i+1) - rsm + fn(j+1, k-1))\n return ans", "def sum(self, start=0, end=None):\n return super().reduce(start, end)", "def checkSumWalk(top=\".\", func=checkSumHelper):\n values = []\n os.path.walk( top, checkSumHelper, values )\n return sum(values)", "def recurrent_sum_of_elements_in_list(lst):\n if len(lst) == 0:\n return 0\n elif len(lst) == 1:\n return lst[0]\n return lst[0] + recurrent_sum_of_elements_in_list(lst[1:])", "def mapping_quality(graph, spanset, gapset):\n the_sum = sum(sum(1 for edge in graph.edges(node) if edge[1] in gapset) for node in spanset)\n # if directed graph, uncomment this:\n #the_sum += sum(sum(1 for edge in graph.edges(node) if edge[1] in spanset) for node in gapset)\n return the_sum", "def fn(x):\n if x <= 0: return int(x == 0)\n return sum(fn(x - xx) for xx in nums)", "def sumRange(self, i, j):\r\n # Sum of the range can be obtained by subtracting dp[j] - dp[i-1]\r\n return self.dp[j] if i == 0 else self.dp[j] - self.dp[i-1]", "def fn(lo, hi, k):\n if lo == hi: return 0 \n while lo+1 < hi and boxes[lo] == boxes[lo+1]: lo, k = lo+1, k+1\n ans = (k+1)*(k+1) + fn(lo+1, hi, 0)\n for mid in range(lo+2, hi): \n if boxes[lo] == boxes[mid]: \n ans = max(ans, fn(lo+1, mid, 0) + fn(mid, hi, k+1))\n return ans", "def sumValues(aList):\r\n sum = 0\r\n for d in aList:\r\n sum += d\r\n return sum", "def linear_sum(S, n):\n if n == 0:\n return 0\n else:\n return linear_sum(S, n - 1) + S[n - 1]", "def solution(number): # O(N)\n m = {\n 0: 0,\n 1: 1\n } # O(1)\n\n for i in range(2, number + 1): # O(N)\n m[i] = m[i - 1] + m[i - 2] # O(1)\n\n return m[number] # O(1)" ]
[ "0.6729964", "0.66551703", "0.64795923", "0.6404012", "0.63082033", "0.6260686", "0.6228508", "0.6228508", "0.6228508", "0.6228508", "0.6228508", "0.61943024", "0.6178861", "0.61783046", "0.61113393", "0.6107871", "0.6049075", "0.604374", "0.6043456", "0.603119", "0.5992261", "0.5984472", "0.5962361", "0.5950429", "0.59476715", "0.59434265", "0.592258", "0.5909091", "0.59060985", "0.59019375", "0.5889684", "0.5888755", "0.585981", "0.58591455", "0.5850731", "0.58413464", "0.5822262", "0.5807625", "0.5786154", "0.5780714", "0.5777695", "0.57769006", "0.577676", "0.57728255", "0.5769285", "0.57611465", "0.57577425", "0.57556915", "0.5751477", "0.5749534", "0.5728839", "0.57240385", "0.5721208", "0.5720037", "0.5717253", "0.5714112", "0.5713837", "0.57088053", "0.5699591", "0.56841445", "0.56717944", "0.5671647", "0.56605726", "0.563672", "0.5623528", "0.5620416", "0.5616532", "0.56107384", "0.5607329", "0.56058663", "0.5604931", "0.5601608", "0.5593907", "0.5586008", "0.5579073", "0.55789286", "0.5578368", "0.55723065", "0.55714476", "0.55661106", "0.55609107", "0.55608326", "0.5559977", "0.5546399", "0.5546399", "0.55387706", "0.55378354", "0.5532027", "0.552463", "0.55238885", "0.5520377", "0.55162996", "0.55135936", "0.5509235", "0.55087084", "0.550701", "0.54998285", "0.54929984", "0.54924107", "0.5487304" ]
0.7450429
0
Loops over arrays in the arrays_iterator and evaluates the cut_function at the cut_values. Returns a list of efficiences, passed events/objects, and total events/objects. cut_function is expected to return a tuple (n_pass, n_total) with input (arrays, cut_value).
def get_eff(arrays_iterator, cut_function, cut_values): n_cuts = len(cut_values) n_total = np.zeros(n_cuts) n_pass = np.zeros(n_cuts) for arrays, dataset in arrays_iterator: weight = dataset.get_weight() for i_cut, cut in enumerate(cut_values): this_n_pass, this_n_total = cut_function(arrays, cut) n_total[i_cut] += weight * this_n_total n_pass[i_cut] += weight * this_n_pass # Basically n_pass / n_total, but returns 0 if n_total has a 0 somewhere eff = np.divide(n_pass, n_total, out=np.zeros_like(n_pass), where=n_total!=0) return eff, n_pass, n_total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n - data_array[arrays_to_cut[0]][0][0]\n )\n total_time = end_time - begin_time\n if total_time > maximum_time:\n over_time = total_time - maximum_time\n array_elm_to_drop = int(over_time / delta_time)\n for arrays in arrays_to_cut:\n data_array[arrays][0] = data_array[arrays][0][\n array_elm_to_drop:\n ]\n data_array[arrays][1] = data_array[arrays][1][\n array_elm_to_drop:\n ]\n except:\n pass", "def split_iters(iter_ranges, n_threads = None):\n\n\n if n_threads is None:\n n_threads = cpu_count()\n \n counts = [safediv(r[1] - r[0], r[2]) for r in iter_ranges]\n # largest_dim = np.max(counts)\n total_count = float(np.sum(counts))\n split_factors = [ (c / total_count) ** 2 for c in counts ]\n if len(counts) > 2:\n # kludgy heuristic\n # if you're reading across multiple dimensions\n # assume there might be reuse of data read in \n # and try to split up work so it fits into cache \n expected_bytes = 8 \n for dim in counts:\n expected_bytes *= dim\n expected_kb = expected_bytes / 1024\n l2_cache_size = 8192\n n_pieces = max(n_threads, expected_kb / l2_cache_size)\n else: \n n_pieces = 2*n_threads \n \n # initialize work_items with an empty single range \n work_items = [[]]\n for (dim_idx,dim_count) in enumerate(counts):\n\n dim_start, _, dim_step = iter_ranges[dim_idx]\n n_dim_pieces = int(math.ceil(split_factors[dim_idx] * n_pieces))\n dim_factor = float(dim_count) / n_dim_pieces\n \n old_work_items = [p for p in work_items]\n work_items = []\n for i in xrange(n_dim_pieces):\n # copy all the var ranges, after which we'll modifying \n # the biggest dimension \n\n start = dim_start + int(math.floor(dim_step * dim_factor * i))\n stop = dim_start + int(math.floor(dim_step * dim_factor * (i+1)))\n \n dim_work_item = (start,stop,dim_step)\n for old_work_item in old_work_items:\n new_work_item = [r for r in old_work_item]\n new_work_item.append(dim_work_item) \n work_items.append(new_work_item)\n\n return work_items", "def cut_eval(self, hits, *args):\n end = self.start_offset + self.train_window + self.predict_window\n return self.cut(hits, self.start_offset, end) + args", "def runCutVals(df, eVal=0., windowSize = 2):\n\n dfg = df.groupby(['cpd1'])\n\n eMin = round(eVal - windowSize/2, 2)\n eMax = round(eMin + windowSize, 2)\n dFullPeakE, dFullBkgE = 0, 0\n dCutPeakE, dCutBkgE = 0, 0\n dFullPeakN, dFullBkgN = 0, 0\n dCutPeakN, dCutBkgN = 0, 0\n\n for name, g in dfg:\n valsFull = g['trapENFCal1'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values\n\n valsCut = g['trapENFCal1'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>=eMin) & (g['trapENFCal1']<=eMax)].values\n if name in enrDetList:\n dFullPeakE += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakE += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgE += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgE += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n elif name in natDetList:\n dFullPeakN += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakN += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgN += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgN += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n\n return dFullPeakE, dCutPeakE, dFullBkgE, dCutBkgE, dFullPeakN, dCutPeakN, dFullBkgN, dCutBkgN", "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n range_answers=args['cut_finding']['range_answers'],\n n_bins=args['cut_finding']['n_bins'])\n return Cuts(values=values, names=names)\n\n if args['experiment']['cut_finding'] == CutFinding.Kernighan_Lin:\n\n values = kernighan_lin(A=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.kmodes:\n\n values = find_kmodes_cuts(xs=data.xs,\n max_nb_clusters=args['cut_finding']['max_nb_clusters'])\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.Fiduccia_Mattheyses:\n\n values = fid_mat(xs=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.linear:\n\n values, equations = linear_cuts(xs=data.xs,\n equations=args['cut_finding']['equations'],\n verbose=verbose)\n\n return Cuts(values=values, equations=equations)\n\n raise ValueError('Wrong name for a cut finding function')", "def iterated_array_from(\r\n self, func: Callable, cls: object, array_lower_sub_2d: Array2D\r\n ) -> Array2D:\r\n\r\n if not np.any(array_lower_sub_2d):\r\n return array_lower_sub_2d.slim\r\n\r\n iterated_array = np.zeros(shape=self.shape_native)\r\n\r\n threshold_mask_lower_sub = self.mask\r\n\r\n for sub_size in self.sub_steps[:-1]:\r\n array_higher_sub = self.array_at_sub_size_from(\r\n func=func, cls=cls, mask=threshold_mask_lower_sub, sub_size=sub_size\r\n )\r\n\r\n try:\r\n threshold_mask_higher_sub = self.threshold_mask_via_arrays_from(\r\n array_lower_sub_2d=array_lower_sub_2d,\r\n array_higher_sub_2d=array_higher_sub,\r\n )\r\n\r\n iterated_array = self.iterated_array_jit_from(\r\n iterated_array=iterated_array,\r\n threshold_mask_higher_sub=threshold_mask_higher_sub,\r\n threshold_mask_lower_sub=threshold_mask_lower_sub,\r\n array_higher_sub_2d=array_higher_sub,\r\n )\r\n\r\n except ZeroDivisionError:\r\n return self.return_iterated_array_result(iterated_array=iterated_array)\r\n\r\n if threshold_mask_higher_sub.is_all_true:\r\n return self.return_iterated_array_result(iterated_array=iterated_array)\r\n\r\n array_lower_sub_2d = array_higher_sub\r\n threshold_mask_lower_sub = threshold_mask_higher_sub\r\n\r\n array_higher_sub = self.array_at_sub_size_from(\r\n func=func,\r\n cls=cls,\r\n mask=threshold_mask_lower_sub,\r\n sub_size=self.sub_steps[-1],\r\n )\r\n\r\n iterated_array_2d = iterated_array + array_higher_sub.binned.native\r\n\r\n return self.return_iterated_array_result(iterated_array=iterated_array_2d)", "def metrics(img_gt, img_pred, voxel_size):\n\n if img_gt.ndim != img_pred.ndim:\n raise ValueError(\"The arrays 'img_gt' and 'img_pred' should have the \"\n \"same dimension, {} against {}\".format(img_gt.ndim,\n img_pred.ndim))\n\n res = []\n # Loop on each classes of the input images\n for c in [3, 1, 2]:\n # Copy the gt image to not alterate the input\n gt_c_i = np.copy(img_gt)\n gt_c_i[gt_c_i != c] = 0\n\n # Copy the pred image to not alterate the input\n pred_c_i = np.copy(img_pred)\n pred_c_i[pred_c_i != c] = 0\n\n # Clip the value to compute the volumes\n gt_c_i = np.clip(gt_c_i, 0, 1)\n pred_c_i = np.clip(pred_c_i, 0, 1)\n\n # Compute the Dice\n dice = dc(gt_c_i, pred_c_i)\n\n # Compute volume\n volpred = pred_c_i.sum() * np.prod(voxel_size) / 1000.\n volgt = gt_c_i.sum() * np.prod(voxel_size) / 1000.\n\n res += [dice, volpred, volpred-volgt]\n\n return res", "def distribution_cut(self, timestamp, window, slo_config):\n conf = slo_config['backend']\n measurement = conf['measurement']\n filter_valid = measurement['filter_valid']\n threshold_bucket = int(measurement['threshold_bucket'])\n good_below_threshold = measurement.get('good_below_threshold', True)\n\n # Query 'valid' events\n series = self.query(timestamp=timestamp,\n window=window,\n filter=filter_valid)\n series = list(series)\n\n if not series:\n return (0, 0) # no timeseries\n\n distribution_value = series[0].points[0].value.distribution_value\n # bucket_options = distribution_value.bucket_options\n bucket_counts = distribution_value.bucket_counts\n valid_events_count = distribution_value.count\n # growth_factor = bucket_options.exponential_buckets.growth_factor\n # scale = bucket_options.exponential_buckets.scale\n\n # Explicit the exponential distribution result\n count_sum = 0\n distribution = OrderedDict()\n for i, bucket_count in enumerate(bucket_counts):\n count_sum += bucket_count\n # upper_bound = scale * math.pow(growth_factor, i)\n distribution[i] = {\n # 'upper_bound': upper_bound,\n # 'bucket_count': bucket_count,\n 'count_sum': count_sum\n }\n LOGGER.debug(pprint.pformat(distribution))\n\n if len(distribution) - 1 < threshold_bucket:\n # maximum measured metric is below the cut after bucket number\n lower_events_count = valid_events_count\n upper_events_count = 0\n else:\n lower_events_count = distribution[threshold_bucket]['count_sum']\n upper_events_count = valid_events_count - lower_events_count\n\n if good_below_threshold:\n good_event_count = lower_events_count\n bad_event_count = upper_events_count\n else:\n good_event_count = upper_events_count\n bad_event_count = lower_events_count\n\n return (good_event_count, bad_event_count)", "def get_buckets(self, first, last, num_buckets, hertz_cutoff=float(5)):\n # Pensar en la posibilidad de no aplicar PCA, permitir utilizar fft sobre una feature diferente, por ejemplo raiz-cuadrada(x2 + y2 + z2)\n if self.pca == True:\n pca = PCA(n_components=1, copy=True, whiten=True)\n numpy_data = array(self.data)\n transformed_dataset = PCA.fit_transform(pca, numpy_data)\n slice=transformed_dataset[first:last]\n else:\n slice = self.data[first:last]\n slice = [column[0] for column in slice]\n \n transformed = fft.fft(slice)\n absolute = [abs(complex) for complex in transformed]\n\n frequencies = self.get_frequencies()\n\n buckets = [0 for i in range(num_buckets)]\n width = hertz_cutoff / num_buckets\n sum_of_buckets = 0.0000001\n for i in range(1, len(absolute)):\n index = int(frequencies[i] / width)\n if index >= num_buckets:\n break\n buckets[index] += absolute[i]\n sum_of_buckets += absolute[i]\n\n #if args.normalize == 't':\n # buckets = map(lambda x: x/sum_of_buckets, buckets)\n\n return buckets", "def get_discrete_split_value(arr: np.ndarray, y: np.ndarray, eval_func: Callable):\n\n # First element is the weighted average eval_func of the split\n # Second term is the intrinsic value to penalize many splits.\n return (\n sum(\n [\n eval_func(y[arr == value]) * np.sum(arr == value) / len(y)\n for value in set(arr)\n ]\n ),\n -1\n * sum(\n [\n pipe(\n np.sum(arr == value) / len(y),\n lambda ratio: ratio * np.log(ratio),\n )\n for value in set(arr)\n ]\n ),\n )", "def conceptcover(bin_arr, limit=1, uncovered=0.1):\n arr = np.copy(bin_arr)\n arr_sum = np.sum(arr)\n result = []\n while True:\n k = kernel(arr)\n i = intent(bin_arr, k)\n e = extent(bin_arr, i)\n if len(e)*len(i) < limit or (e, i) in result: break\n result.append((e, i))\n arr = removed(arr, e, i)\n if np.sum(arr)/arr_sum < uncovered: break\n return result", "def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n return np.array([segment[0] + i / n_bins * (segment[1] - segment[0])\n for i in range(n_bins)]\n + [float(segment[1])])", "def _computeValueFunction(self, nbDims, low, high, retstep=False):\n # algorithms performing in discrete space will have a discrete\n # value function that cannot be evaluated at any point - only on the\n # ones for which they have been setup based on the problem it has been\n # setup to solve\n def __round(vec):\n return tuple(int(x) for x in vec)\n\n def __notround(vec):\n return vec\n\n _round = __notround\n if self._algo.DOMAIN['state'] == Spaces.Discrete:\n _round = __round\n\n allParams, stepSizes = self._discretizer.discretize(retstep=True)\n\n allActions = self._problem.getActionsList()\n reducer = max if self.reducer == 'max' else mean\n\n # returns a list\n data = [\n utils.extends({\n key: state[k]\n for k, key in enumerate(self.getKeys(nbDims))\n }, z=reducer([\n self._algo.actionValue(_round(state), action)\n for action in allActions]))\n for state in allParams\n ]\n if retstep:\n return data, stepSizes\n return data", "def get_split_goodness_fit_continuous (\n feature_array: np.ndarray, target_array: np.ndarray, split: float, evaluate_function: Callable\n ):\n # Get above and below the split value\n above = feature_array >= split\n below = feature_array < split\n\n # Get weighted average evaluate_function on the splits\n n_above = np.sum ( above )\n above_eval = (\n evaluate_function ( target_array [ above ] ) * n_above / len ( target_array )\n ) # Weight = frac points in above\n below_eval = (\n evaluate_function ( target_array [ below ] ) * ( len ( target_array ) - n_above ) / len ( target_array )\n ) # Weight = frac points not in above\n\n # returns weighted sum of evaluate_function across splits & the gain ratio denominator\n return (\n above_eval + below_eval,\n -1\n * sum (\n map (\n lambda x: x * np.log ( x ),\n [ n_above / len ( target_array ), ( len ( target_array ) - n_above ) / len ( target_array ) ],\n )\n ),\n ) # End get_split_goodness_fit_continuous", "def get_split_goodness_fit_continuous(\n arr: np.ndarray, y: np.ndarray, split: float, eval_func: Callable\n ):\n # Get above and below the split value\n above = arr >= split\n below = arr < split\n\n # get weighted average eval_func on the splits\n n_above = np.sum(above)\n above_eval = (\n eval_func(y[above]) * n_above / len(y)\n ) # weight = frac points in above\n below_eval = (\n eval_func(y[below]) * (len(y) - n_above) / len(y)\n ) # weight = frac points not in above\n\n # returns weighted sum of eval_func across splits, and the gain ratio denominator\n return (\n above_eval + below_eval,\n -1\n * sum(\n map(\n lambda x: x * np.log(x),\n [n_above / len(y), (len(y) - n_above) / len(y)],\n )\n ),\n )", "def compute_cost_and_order_cuts(cuts, cost_function):\n\n cost_cuts = np.zeros(len(cuts.values), dtype=float)\n for i_cut, cut in enumerate(cuts.values):\n cost_cuts[i_cut] = cost_function(cut)\n idx = np.argsort(cost_cuts)\n\n cuts.values = cuts.values[idx]\n cuts.costs = cost_cuts[idx]\n if cuts.names is not None:\n cuts.names = cuts.names[idx]\n if cuts.equations is not None:\n cuts.equations = cuts.equations[idx]\n\n return cuts", "def calc_features(self, instance):\n results = []\n\n for attribute, function in FeatureCalcerMeanCtr.FUNCTIONS:\n attribute_value = function(instance, attribute)\n if attribute_value not in self.attr2ctr[attribute]:\n results.append(self.mean_ctr)\n results.append(self.mean_ctr)\n results.append(0)\n continue\n clicks, impressions = self.attr2ctr[attribute][attribute_value]\n results.append(self.calc_ctr(clicks, impressions))\n results.append(self.calc_ctr(clicks, impressions, 0.08, 75))\n results.append(impressions)\n return results", "def batch_eval(f, pts):\n\n\t# Use this array to send into FEniCS.\n\tout = np.zeros(1)\n\n\tdef gen():\n\t\tfor pt in pts.reshape(2, -1).T:\n\t\t\tf.eval(out, pt)\n\t\t\tyield out[0]\n\n\tvalues = list(gen())\n\tavalues = np.array(values).reshape(pts.shape[1:])\n\treturn avalues", "def _iterate_over_factors(self, func, args):\n # TODO The user may prefer to provide the arguments as lists and receive them as\n # TODO lists, as this may be the form in which they are available. This should\n # TODO be allowed, rather than packing and unpacking them repeatedly.\n args_list, numerical_args = self._validate_and_prepare_args_for_iteration(args)\n\n out = [\n self._get_method(self.factors[i], func, args_list[i], numerical_args)\n for i in range(len(self.factors))\n ]\n if self._pool_outputs:\n return self._pool_outputs_from_function(out)\n return out", "def _extract(self, n_elements, n_warmup_functions,\n counters, threshold, n_extractions, decay):\n input_list = np.arange(n_elements)\n\n pre_exponentiation_list = np.arange(n_elements)\n # zero the already extracted values\n inidices_not_to_extract = tuple(np.where(counters >= threshold))\n pre_exponentiation_list[inidices_not_to_extract] = 0\n #print('pre_exponentiation_list after zero: ', pre_exponentiation_list[:40])\n # rescale values so that the smallest is 1\n # get the smallest (non-zero!) value\n # bring 0s artificially to max value\n pre_exponentiation_list[inidices_not_to_extract] = np.max(pre_exponentiation_list)\n min_value = np.min(pre_exponentiation_list)\n pre_exponentiation_list = pre_exponentiation_list - (min_value - 1)\n # bring the already extracted back to 0 (since we just got them negative)\n pre_exponentiation_list[inidices_not_to_extract] = 0\n #print('pre_exponentiation_list after min subtract: ', pre_exponentiation_list[:40])\n\n # create probabilities exponential decay\n # so that it is more probable to select elements from the head\n # esponential decay y = a * (1 - b) * x\n max_prob = 1\n exp_list = np.array([max_prob * ((1 - decay)**e) for e in pre_exponentiation_list])\n\n # remove elements that have counter above thresholds\n # aka we already extracted them the required number of times\n inidices_not_to_extract = tuple(np.where(counters >= threshold))\n exp_list[inidices_not_to_extract] = 0\n\n total_value = sum(exp_list)\n probab_list = np.array([e/total_value for e in exp_list])\n logger.debug('Eponential probabilities (after normalization)')\n logger.debug(probab_list)\n # extract indices\n logger.debug('non zero: ' + str(np.count_nonzero(probab_list)))\n extracted_indices = \\\n np.random.choice(input_list, n_extractions + n_warmup_functions,\n p=probab_list, replace=False)\n logger.debug(extracted_indices)\n indices_to_consider = extracted_indices[:n_extractions]\n logger.debug(indices_to_consider)\n # update counters\n counters[indices_to_consider] = counters[indices_to_consider] + 1\n # reorder so that the wormup are at the beginning\n warmup_first_extracted_indices = extracted_indices[n_extractions:]\n warmup_first_extracted_indices = \\\n np.concatenate((extracted_indices[-n_warmup_functions:], extracted_indices[:n_extractions]))\n logger.info(warmup_first_extracted_indices)\n return warmup_first_extracted_indices, counters", "def compute_iterations(self):\n\n nb_iter = min([len(a.ysec_iter) for a in self], 0)\n # syncronize all iterations to a single one\n for oneresult in self:\n oneresult.change_iterations_number(nb_iter)\n \n # compute value error for each iteration\n for i in range(nb_iter):\n value = [one.ysec_iter[i] for one in self]\n error = [one.yerr_iter[i]**2 for one in self]\n \n # store the value for the iteration\n self.ysec_iter.append(sum(value))\n self.yerr_iter.append(math.sqrt(sum(error)))", "def _iterative_cutting(g, p):\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res", "def fcn(self, data_in):\n \n assert isinstance(data_in, _np.ndarray), 'Required input is an ndarray'\n\n assert data_in.ndim == 1, 'Required input is a 1D ndarray'\n \n data_out = 0*data_in\n\n cutter = CutEveryNSpectra(self.parameters['offset'], cut_m=self.parameters['cut_m'],\n every_n=self.parameters['every_n'], action=self.parameters['action'])\n\n # Because of the limits of PlotEffect, the input and output data HAS TO BE the same size\n temp = cutter.calculate(_np.repeat(data_in[:,None], 11, axis=-1)).sum(axis=-1)\n data_out[:temp.size] = temp\n \n return data_out", "def evaluate(self, test_data, split=2):\n size = int(len(test_data) / split)\n mini_batch_split = np.arange(size, len(test_data), size)\n mini_batches = np.split(test_data, mini_batch_split)\n total = 0\n\n for mini_batch in mini_batches:\n xs, ys = np.array(mini_batch).T\n xs = cp.array(cp.vstack(xs).astype(np.float64).reshape((-1, self.sizes[0], 1)))\n ys = cp.array(ys.astype(np.int64))\n total += cp.sum(cp.argmax(self.feedforward(xs), axis=(1, 2)) == ys)\n return total", "def test_1d_cut():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test_cut.ft\")\n assert data.shape == (2766,)\n assert data.dtype == 'float32'\n assert round(data[0],2) == -12123.67\n assert round(data[1],2) == -8979.31\n assert round(data[100],2) == -7625.30\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[278.59, 10.03])", "def roccurve(signals, bkgs, cut_function, cut_values):\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs), cut_function, cut_values)\n return eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg", "def feature_processing(array2d):\n new_array2d = np.zeros([array2d.shape[0], 29])\n # items/ orders\n new_array2d[:, 0] = array2d[:, 4] / array2d[:, 3]\n # cancels / orders\n new_array2d[:, 1] = array2d[:, 5] / array2d[:, 3]\n # returns / items\n new_array2d[:, 2] = array2d[:, 6] / array2d[:, 4]\n # voucher / orders\n new_array2d[:, 3] = array2d[:, 10] / array2d[:, 3]\n # female_items / female_items + male_items\n new_array2d[:, 4] = array2d[:, 15] / ([1 if x == 0 else x for x in (array2d[:, 15] + array2d[:, 16])])\n # male_items / female_items + male_items\n new_array2d[:, 5] = array2d[:, 16] / ([1 if x == 0 else x for x in (array2d[:, 15] + array2d[:, 16])])\n # unisex_items / items\n new_array2d[:, 6] = array2d[:, 17] / array2d[:, 4]\n # wapp_items / items\n new_array2d[:, 7] = array2d[:, 18] / array2d[:, 4]\n # wftw_items / items\n new_array2d[:, 8] = array2d[:, 19] / array2d[:, 4]\n # mapp_items / items\n new_array2d[:, 9] = array2d[:, 20] / array2d[:, 4]\n # wacc_items / items\n new_array2d[:, 10] = array2d[:, 21] / array2d[:, 4]\n # macc_items / items\n new_array2d[:, 11] = array2d[:, 22] / array2d[:, 4]\n # mftw_items / items\n new_array2d[:, 12] = array2d[:, 23] / array2d[:, 4]\n # wspt_items / items\n new_array2d[:, 13] = array2d[:, 24] / array2d[:, 4]\n # mspt_items / items\n new_array2d[:, 14] = array2d[:, 25] / array2d[:, 4]\n # curvy_items / items\n # Curvy item has a strong correlation with gender, however they are very right-skewed use np.power(1/6) to smooth it\n new_array2d[:, 15] = np.power(array2d[:, 26] / array2d[:, 4], 1 / 6)\n # sacc_items / items\n new_array2d[:, 16] = array2d[:, 27] / array2d[:, 4]\n # msite_orders / orders\n new_array2d[:, 17] = array2d[:, 28] / array2d[:, 3]\n # desktop_orders / orders\n new_array2d[:, 18] = array2d[:, 29] / array2d[:, 3]\n # android_orders / orders\n new_array2d[:, 19] = array2d[:, 30] / array2d[:, 3]\n # ios_orders / orders\n new_array2d[:, 20] = array2d[:, 31] / array2d[:, 3]\n # other_device_orders / orders\n new_array2d[:, 21] = array2d[:, 32] / array2d[:, 3]\n # work_orders / orders\n new_array2d[:, 22] = array2d[:, 33] / array2d[:, 3]\n # home_orders / orders\n new_array2d[:, 23] = array2d[:, 34] / array2d[:, 3]\n # parcelpoint_orders / orders\n new_array2d[:, 24] = array2d[:, 35] / array2d[:, 3]\n # other_collection_orders / orders\n new_array2d[:, 25] = array2d[:, 36] / array2d[:, 3]\n # average_discount_onoffer\n new_array2d[:, 26] = array2d[:, 39]\n # average_discount_used\n new_array2d[:, 27] = array2d[:, 40]\n # revenue / order\n new_array2d[:, 28] = array2d[:, 41] / array2d[:, 3]\n\n # normalize by each feature\n new_array2d = normalize(new_array2d, axis=0, norm='max')\n return new_array2d", "def game_function(\n game, function, num_resamples, num_returned, *, percentiles=None, processes=None\n):\n results = np.empty((num_resamples, num_returned))\n\n chunksize = num_resamples if processes == 1 else 4\n with multiprocessing.Pool(processes) as pool:\n for i, res in enumerate(\n pool.imap_unordered(\n functools.partial(_resample_function, function, game),\n range(num_resamples),\n chunksize=chunksize,\n )\n ):\n results[i] = res\n\n if percentiles is None: # pylint: disable=no-else-return\n results.sort(0)\n return results.T\n else:\n return np.percentile(results, percentiles, 0).T", "def get_bc_array_for_all_frequencies(self, loaded_table, boundary_condition):\n if self.frequencies is None:\n number_frequencies = 1\n else:\n number_frequencies = len(self.frequencies)\n\n if loaded_table:\n list_arrays = [np.zeros(number_frequencies, dtype=float) if bc is None else bc[0:number_frequencies] for bc in boundary_condition]\n self.no_table = False\n else:\n list_arrays = [np.zeros(number_frequencies, dtype=float) if bc is None else np.ones(number_frequencies, dtype=float)*bc for bc in boundary_condition]\n\n return list_arrays", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = []\n errors = []\n cleaned_errors = []\n\n ### your code goes here\n count = 0\n for p in predictions:\n errors.append((net_worths[count] - p) * (net_worths[count] - p))\n count = count + 1\n \n sorted_errors = sorted(errors)\n outlier_definer = sorted_errors[80]\n\n cleaned_net_worths = net_worths[errors < outlier_definer]\n cleaned_ages = ages[errors < outlier_definer]\n cleaned_predictions = predictions[errors < outlier_definer]\n\n print(len(cleaned_net_worths))\n\n count = 0\n for p in cleaned_predictions:\n cleaned_errors.append((cleaned_net_worths[count] - p) * (cleaned_net_worths[count] - p))\n count = count + 1\n\n cleaned_data = tuple(zip(cleaned_ages, cleaned_net_worths, cleaned_errors))\n \n return cleaned_data", "def simpleAlgorithm(values):\n # Returns a list of all the cluster values [0, 0, 1, 1, 1, 1, 1, 3, 4, 5, 5, 5, ...]\n clusterValues = [value[2] for value in values]\n # Find the mode cluster number in the clusterValues list\n mode = findMode(clusterValues)\n # Filter out values with cluster MODE\n filteredValues = [value for value in values if value[2] == mode]\n # Lowest unixtime in the values list\n startUnixtime = values[0][0]\n # The differences between a data tuple's unixtime and the start unixtime in a list\n unixtimeDiffs = [float(elem[0] - startUnixtime) for elem in filteredValues]\n # The sum of all the differences\n total = sum(unixtimeDiffs)\n # A list of weight values\n weightValues = [elem / total for elem in unixtimeDiffs]\n\n # Creates a list of weights for each filtered value\n # weightValues = [float(value[0]) / float(mostRecentUnixTime) for value in filteredValues]\n # weightValues = [float(index) / float(len(filteredValues)) for index in range(len(filteredValues))]\n # weightValues = [1 - (0.01 * index) for index in range(len(filteredValues))]\n # weightValues = weightValues[::-1]\n print weightValues\n # A list of the light values\n filteredValuesLight = [value[1] for value in filteredValues]\n # Return weighted average of light values\n return np.average(filteredValuesLight, weights = weightValues)", "def fasper_py(x,y,ofac,hifac, MACC=4):\n #Check dimensions of input arrays\n n = long(len(x))\n if n != len(y):\n print 'Incompatible arrays.'\n return\n\n nout = 0.5*ofac*hifac*n\n nfreqt = long(ofac*hifac*n*MACC) #Size the FFT as next power\n nfreq = 64L # of 2 above nfreqt.\n\n while nfreq < nfreqt: \n nfreq = 2*nfreq\n\n ndim = long(2*nfreq)\n \n #Compute the mean, variance\n ave = y.mean()\n ##sample variance because the divisor is N-1\n var = ((y-y.mean())**2).sum()/(len(y)-1) \n # and range of the data.\n xmin = x.min()\n xmax = x.max()\n xdif = xmax-xmin\n\n #extirpolate the data into the workspaces\n wk1 = np.zeros(ndim, dtype='complex')\n wk2 = np.zeros(ndim, dtype='complex')\n\n fac = ndim/(xdif*ofac)\n fndim = ndim\n ck = ((x-xmin)*fac) % fndim\n ckk = (2.0*ck) % fndim\n\n for j in range(0L, n):\n __spread__(y[j]-ave,wk1,ndim,ck[j],MACC)\n __spread__(1.0,wk2,ndim,ckk[j],MACC)\n\n #Take the Fast Fourier Transforms\n wk1 = np.fft.ifft( wk1 )*len(wk1)\n wk2 = np.fft.ifft( wk2 )*len(wk1)\n\n wk1 = wk1[1:nout+1]\n wk2 = wk2[1:nout+1]\n rwk1 = wk1.real\n iwk1 = wk1.imag\n rwk2 = wk2.real\n iwk2 = wk2.imag\n \n df = 1.0/(xdif*ofac)\n \n #Compute the Lomb value for each frequency\n hypo2 = 2.0 * abs( wk2 )\n hc2wt = rwk2/hypo2\n hs2wt = iwk2/hypo2\n\n cwt = np.sqrt(0.5+hc2wt)\n swt = np.sign(hs2wt)*(np.sqrt(0.5-hc2wt))\n den = 0.5*n+hc2wt*rwk2+hs2wt*iwk2\n cterm = (cwt*rwk1+swt*iwk1)**2./den\n sterm = (cwt*iwk1-swt*rwk1)**2./(n-den)\n\n wk1 = df*(np.arange(nout, dtype='float')+1.)\n wk2 = (cterm+sterm)/(2.0*var)\n pmax = wk2.max()\n jmax = wk2.argmax()\n\n\n #Significance estimation\n #expy = exp(-wk2) \n #effm = 2.0*(nout)/ofac \n #sig = effm*expy\n #ind = (sig > 0.01).nonzero()\n #sig[ind] = 1.0-(1.0-expy[ind])**effm\n\n #Estimate significance of largest peak value\n expy = np.exp(-pmax) \n effm = 2.0*(nout)/ofac \n prob = effm*expy\n\n if prob > 0.01: \n prob = 1.0-(1.0-expy)**effm\n\n return wk1,wk2,nout,jmax,prob", "def num_elements_eval_function_capped(individual, test_data, truth_data, name=None):\r\n return max(len(individual), 1707)", "def _get_total_fitness(self, populations, function):\n total_fit = []\n for population in populations:\n total_fit.append(sum([function.fit(*arg) for arg in population]))\n\n return total_fit", "def source_cut(env, \r\n number, \r\n counter,\r\n generation,\r\n generation_list_come,\r\n generation_list_wait,\r\n generation_list_begin,\r\n generation_list_finish,\r\n df_simtime,\r\n generation_list_name,\r\n sum_cut_number_list):\r\n sum_cut_number = 0\r\n for i in range(number):\r\n sample_j = np.random.choice(df_caltocut_distr['time'])\r\n sum_cut_number += sample_j\r\n for j in range(sample_j):\r\n if j == 0:\r\n if i == 0:\r\n t = generation_list_come[i]#到达时间服从指数分布,此处的t为间隔时间\r\n else:\r\n t = generation_list_come[i] - generation_list_come[i-1]\r\n else:\r\n t = 0\r\n \r\n yield env.timeout(t)\r\n serve_time = np.random.choice(df_simtime['sim_time'])#得到模拟数据\r\n # print(serve_time)\r\n c = document(env, \r\n 'Doc%02d_%02d' %(i,j), \r\n generation,\r\n counter, \r\n time_in_fac,\r\n generation_list_begin,\r\n generation_list_wait,\r\n generation_list_finish,\r\n serve_time,\r\n generation_list_name)\r\n env.process(c)\r\n sum_cut_number_list.append(sum_cut_number)", "def outlierCleaner(predictions, ages, net_worths):\n residual_errors = [prediction - net_worth for prediction,net_worth in zip(predictions, net_worths)]\n # get a list of tuples, each formed from each element of the 3 arrays; works somehow even though ages is a 2D array (but with second dimension equal to 1)\n cleaned_data = zip(ages, net_worths, residual_errors)\n # sort cleaned_data by the 3rd element in the tuple, i.e. residual_errors\n cleaned_data.sort(key=lambda tup: tup[2])\n num_elements = len(cleaned_data) - int(0.1 * len(cleaned_data))\n # get the first \"num_elements\" elements from the sorted cleaned_data\n cleaned_data = cleaned_data[:num_elements]\n\n ### your code goes here\n print(num_elements, len(ages))\n\n return cleaned_data", "def get_discrete_split_value ( feature_array: np.ndarray, target_array: np.ndarray, evaluate_function: Callable ):\n\n # First element is the weighted average evaluate_function of the split\n # Second term is the intrinsic value to penalize many splits.\n return (\n sum (\n [\n evaluate_function ( target_array [ feature_array == value ] ) * np.sum ( feature_array == value ) / len ( target_array )\n for value in set ( feature_array )\n ]\n ),\n -1\n * sum (\n [\n pipe (\n np.sum ( feature_array == value ) / len ( target_array ),\n lambda ratio: ratio * np.log ( ratio ),\n )\n for value in set ( feature_array )\n ]\n ),\n ) # End get_discrete_split_value()", "def array_part_L2_loops_pruning(loops, config):\n pruned_loops = []\n tuning = config['tuning']\n loop_stop = 0\n for c in tuning['array_part_L2']['coincident']:\n if not c:\n break\n loop_stop += 1\n ubs = tuning['array_part_L2']['tilable_loops'][loop_stop:]\n for loop in loops:\n # Examine [loop_stop:-1], only leave those that equal the upper bound\n loop_cut = loop[loop_stop:]\n if loop_cut != ubs:\n continue\n pruned_loops.append(loop)\n\n return pruned_loops", "def __call__(self, g, n_partitions):\n\n def _iterative_cutting(g, p):\n \"\"\"helper function (iterative version)\"\"\"\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res\n\n def _recursive_cutting(g, p, res=[]):\n \"\"\"helper function (recursive version)\"\"\"\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res\n\n # when computing a partitioning for the graph nodes,\n # if result is known for a smaller value of n_partitions\n # don't restart from scratch but use it as an initial value\n if g not in self._cache or len(self._cache[g]) < n_partitions:\n self._cache.clear()\n partitions = _recursive_cutting(g, p=n_partitions)\n self._cache[g] = partitions[:]\n else:\n partitions = self._cache[g][:]\n\n # merge small partitions to return the required number of partitions\n while len(partitions) > n_partitions:\n partitions.sort(key=len, reverse=True)\n e1 = partitions.pop()\n e2 = partitions.pop()\n partitions.append(e1.union(e2))\n return partitions", "def suppression(y_subs, buckets, its, windows):\n\n for i in range(its):\n w0 = windows[i]\n\n for j in range(1, buckets):\n v = min(j, w0, buckets-j)\n a = np.mean(y_subs[j-v:j+v+1])\n y_subs[j] = min(a, y_subs[j])\n\n for j in range(buckets-1, 0, -1):\n v = min(j, w0, buckets-j)\n a = np.mean(y_subs[j-v:j+v+1])\n y_subs[j] = min(a, y_subs[j])\n\n return y_subs", "def fasper(x, y, ofac, hifac, n_threads, MACC=4):\n # Check dimensions of input arrays\n n = int(len(x))\n if n != len(y):\n print('Incompatible arrays.')\n return\n \n # print x, y, hifac, ofac\n \n nout = int(0.5*ofac*hifac*n)\n nfreqt = int(ofac*hifac*n*MACC) #Size the FFT as next power\n nfreq = 64 # of 2 above nfreqt.\n\n while nfreq < nfreqt:\n nfreq = 2*nfreq\n\n ndim = int(2*nfreq)\n \n # Compute the mean, variance\n ave = y.mean()\n # sample variance because the divisor is N-1\n var = ((y - y.mean())**2).sum()/(len(y) - 1) \n # and range of the data.\n xmin = x.min()\n xmax = x.max()\n xdif = xmax - xmin\n\n # extrapolate the data into the workspaces\n if is_pyfftw:\n wk1 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.\n wk2 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.\n else:\n wk1 = zeros(ndim, dtype='complex')\n wk2 = zeros(ndim, dtype='complex')\n\n fac = ndim/(xdif*ofac)\n fndim = ndim\n ck = ((x - xmin)*fac) % fndim\n ckk = (2.0*ck) % fndim\n \n for j in range(0, n):\n __spread__(y[j] - ave, wk1, ndim, ck[j], MACC)\n __spread__(1.0, wk2, ndim, ckk[j], MACC)\n\n # Take the Fast Fourier Transforms.\n if is_pyfftw:\n fft_wk1 = pyfftw.builders.ifft(wk1, planner_effort='FFTW_ESTIMATE',\n threads=n_threads)\n wk1 = fft_wk1() * len(wk1)\n fft_wk2 = pyfftw.builders.ifft(wk2, planner_effort='FFTW_ESTIMATE',\n threads=n_threads)\n wk2 = fft_wk2() * len(wk2)\n else:\n wk1 = ifft(wk1)*len(wk1)\n wk2 = ifft(wk2)*len(wk1)\n\n wk1 = wk1[1:nout + 1]\n wk2 = wk2[1:nout + 1]\n rwk1 = wk1.real\n iwk1 = wk1.imag\n rwk2 = wk2.real\n iwk2 = wk2.imag\n \n df = 1.0/(xdif*ofac)\n \n # Compute the Lomb value for each frequency\n hypo2 = 2.0*abs(wk2)\n hc2wt = rwk2/hypo2\n hs2wt = iwk2/hypo2\n\n cwt = sqrt(0.5 + hc2wt)\n swt = sign(hs2wt)*(sqrt(0.5 - hc2wt))\n den = 0.5*n + hc2wt*rwk2 + hs2wt*iwk2\n cterm = (cwt*rwk1 + swt*iwk1)**2./den\n sterm = (cwt*iwk1 - swt*rwk1)**2./(n - den)\n\n wk1 = df*(arange(nout, dtype='float') + 1.)\n wk2 = (cterm + sterm)/(2.0*var)\n pmax = wk2.max()\n jmax = wk2.argmax()\n\n # Estimate significance of largest peak value\n expy = exp(-pmax) \n effm = 2.0*(nout)/ofac\n prob = effm*expy\n\n if prob > 0.01: \n prob = 1.0 - (1.0 - expy)**effm\n\n return wk1, wk2, nout, jmax, prob", "def calc_statistics(self):\n\n self._ydata = np.zeros([4, len(self._depths)], dtype=float)\n\n exp_vals = np.zeros(self._ntrials, dtype=float)\n ideal_vals = np.zeros(self._ntrials, dtype=float)\n\n for depthidx, depth in enumerate(self._depths):\n\n exp_shots = 0\n\n for trialidx in range(self._ntrials):\n cname = 'qv_depth_%d_trial_%d' % (depth, trialidx)\n exp_vals[trialidx] = self._heavy_output_counts[cname]\n exp_shots += self._circ_shots[cname]\n ideal_vals[trialidx] = self._heavy_output_prob_ideal[cname]\n\n # Calculate mean and error for experimental data\n self._ydata[0][depthidx] = np.sum(exp_vals)/np.sum(exp_shots)\n self._ydata[1][depthidx] = (self._ydata[0][depthidx] *\n (1.0-self._ydata[0][depthidx])\n / self._ntrials)**0.5\n\n # Calculate mean and error for ideal data\n self._ydata[2][depthidx] = np.mean(ideal_vals)\n self._ydata[3][depthidx] = (self._ydata[2][depthidx] *\n (1.0-self._ydata[2][depthidx])\n / self._ntrials)**0.5", "def convolve_and_sum_slow(loadings, unit_response_functions=None):\n\n loadings = loadings.T\n print(loadings.shape)\n print(\"Convolving\")\n if (\n unit_response_functions is None\n ): # this logic is temporary, but have a safeguard so it's not accidentally used in production\n if settings.DEBUG:\n unit_response_functions = numpy.ones(\n [loadings.shape[0], loadings.shape[1], loadings.shape[2]],\n dtype=numpy.float64,\n )\n else:\n raise ValueError(\"Must provide Unit Response Functions!\")\n\n time_span = loadings.shape[0]\n output_matrix = numpy.zeros(\n [loadings.shape[0], loadings.shape[1], loadings.shape[2]], dtype=numpy.float64\n )\n\n for year in range(time_span):\n print(year)\n URF_length = time_span - year\n\n print(\"Subset\")\n subset_start = arrow.utcnow()\n current_year_loadings = loadings[\n year,\n :,\n :,\n ]\n subset_end = arrow.utcnow()\n print(subset_end - subset_start)\n\n # print(\"Reshape\")\n # reshape_start = arrow.utcnow()\n # reshaped_loadings = current_year_loadings.reshape(current_year_loadings.shape)\n # print(reshaped_loadings.shape)\n # repeated_loadings = numpy.repeat(reshaped_loadings, URF_length, 2)\n # reshape_end = arrow.utcnow()\n # print(reshape_end - reshape_start)\n\n print(\"Multiply\")\n multiply_start = arrow.utcnow()\n new_loadings = numpy.multiply(\n current_year_loadings, unit_response_functions[:URF_length, :, :]\n )\n multiply_end = arrow.utcnow()\n print(multiply_end - multiply_start)\n\n print(\"Add and Insert Back in\")\n add_start = arrow.utcnow()\n numpy.add(output_matrix[year:, :, :], new_loadings, output_matrix[year:, :, :])\n add_end = arrow.utcnow()\n print(add_end - add_start)\n # multiply this year's matrix * URFs matrix sliced to represent size of future\n # then add result to output_matrix\n\n results = numpy.sum(output_matrix, [1, 2]) # sum in 2D space", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def _run_alg(data, agg_col, cat_cols, model, null_responses):\n agg_units = sorted(set(data[agg_col]), key=lambda x: (str(type(x)), x))\n outlier_scores = collections.defaultdict(dict)\n agg_to_data = {}\n agg_col_to_data = {}\n for agg_unit in agg_units:\n # TODO: could this be smarter and remove data each time? maybe no savings.\n # TODO: support numpy only again\n agg_to_data[agg_unit] = data[data[agg_col] == agg_unit]\n agg_col_to_data[agg_unit] = {}\n \n for col in cat_cols:\n col_vals = sorted(set(data[col]), key=lambda x: (str(type(x)), x))\n col_vals = [c for c in col_vals if c not in null_responses]\n frequencies = {}\n for agg_unit in agg_units:\n frequencies[agg_unit],grouped = _get_frequencies(data, col, col_vals, agg_col, agg_unit, agg_to_data)\n agg_col_to_data[agg_unit][col] = grouped\n outlier_scores_for_col, expected_frequencies_for_col, p_values_for_col = model.compute_outlier_scores(frequencies)\n for agg_unit in agg_units:\n outlier_scores[agg_unit][col] = {'score': outlier_scores_for_col[agg_unit],\n 'observed_freq': frequencies[agg_unit],\n 'expected_freq': expected_frequencies_for_col[agg_unit],\n 'p_value': p_values_for_col[agg_unit]}\n return outlier_scores, agg_col_to_data", "def n0derivative_clte(cl_array,bins,n0bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n\n a=compute_n0_py(clpp,cls,cltt,clee,clbb,array1001[i],NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n b=compute_n0_py(clpp,cls,cltt,clee,clbb,array999[i],NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n \n keys=['TT','EE','EB','TE','TB']\n\n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n0bins)]-N0999[k][i][:len(n0bins)])*(n0bins*(n0bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n0{}dclte.txt'.format(keys[k]),der)\n return derlist", "def score_cut_vectors(self, cut_vectors: np.array) -> List[np.float64]:\n\n ##Needs to be replaced by a better, vectorized (or parallel) method\n scores = []\n for i in range(cut_vectors.shape[0]):\n cut_0 = np.argwhere(cut_vectors[i,:] == 0).reshape((1,-1))[0]\n cut_1 = np.argwhere(cut_vectors[i,:] == 1).reshape((1,-1))[0]\n\n pairs = np.array(np.meshgrid(cut_0, cut_1)).T.reshape(-1, 2)\n\n score_i = sum([self.matrix[x, y] for x, y in pairs])\n scores.append(score_i)\n\n return scores", "def get_plotting_data(each_misfit_windows_collection, iterations_list, snr_threshold, event_depth_dict):\n result = {}\n phases_zr = [\"P\", \"pP\", \"sP\", \"PP\", \"S\", \"sS\", \"SS\"]\n phases_t = [\"ScS\", \"S\", \"sS\", \"SS\"]\n conditions = {\n \"P\": {\n \"exclude_p\": False,\n \"exclude_s\": True\n },\n \"pP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"sP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"PP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"S\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"sS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"SS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"ScS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"surface_z\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"surface_r\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"surface_t\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n }\n # we can exrtact the information from the misfit_windows in the order of the pdf output.\n # order will be z,r,t[,surface_z,surface_r,surface_t]\n rep_net_sta = sorted(event_depth_dict.keys())[0]\n event_depth_this_event = event_depth_dict[rep_net_sta]\n if (event_depth_this_event > SURFACE_THRESHOLD):\n category_list = [\"z\", \"r\", \"t\"]\n category_phases = [phases_zr, phases_zr, phases_t]\n else:\n category_list = [\"z\", \"r\", \"t\", \"surface_z\", \"surface_r\", \"surface_t\"]\n category_phases = [phases_zr, phases_zr, phases_t,\n [\"surface_z\"], [\"surface_r\"], [\"surface_t\"]]\n for each_iteration in iterations_list:\n result[each_iteration] = {}\n for each_category, each_category_phases in zip(category_list, category_phases):\n result[each_iteration][each_category] = []\n for each_category_phase in each_category_phases:\n phase_condition = conditions[each_category_phase]\n cc = get_windows_cc(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n cc = cc[cc >= 0]\n deltat = get_windows_deltat(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n deltat = deltat[np.abs(deltat) <= 10]\n similarity = get_windows_similarity(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n similarity = similarity[similarity >= 0]\n result[each_iteration][each_category].append(\n {\"net_sta\": get_windows_net_sta(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase),\n \"cc\": cc,\n \"deltat\": deltat,\n \"similarity\": similarity,\n }\n )\n # result:dict->each_iteration:dict->each_category:list as the dict showed before, we should return the category_phases\n # we should combine the surface wave phases to one page\n if (len(category_phases) == 6):\n for each_iteration in iterations_list:\n category_phases = [phases_zr, phases_zr, phases_t,\n [\"surface_z\", \"surface_r\", \"surface_t\"]]\n category_list = [\"z\", \"r\", \"t\", \"surface\"]\n result[each_iteration][\"surface\"] = []\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_z\"][0])\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_r\"][0])\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_t\"][0])\n del result[each_iteration][\"surface_z\"]\n del result[each_iteration][\"surface_r\"]\n del result[each_iteration][\"surface_t\"]\n\n return result, category_phases, category_list", "def averaged_1d_array(arr, partitions):\n def f(p):\n start, stop = p\n sub_arr = arr[start : stop]\n #from IPython import embed; embed()\n return sub_arr.sum(axis=0)/(stop - start)\n\n return np.array([f(p) for p in partitions])", "def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)", "def n1derivative_clte(cl_array,bins,n1bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n1_py(clpp,norms,cls,cltt,clee,clbb,array1001[i],NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n b=compute_n1_py(clpp,norms,cls,cltt,clee,clbb,array999[i],NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n \n keys=['TT','EE','EB','TE','TB']\n\n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n1bins)]-N0999[k][i][:len(n1bins)])*(n1bins*(n1bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n1{}dclte.txt'.format(keys[k]),der)\n return derlist", "def step_func_cat(X, args, Y, info, Ytarget, err, tols, iter, maxIter):\n [XF, ZF, L, EA, W, CB, WL, WEA, L_EA, CB_EA] = args[\"cat\"]\n\n # if abs( err[1] + ZF ) < 0.0001:\n # breakpoint()\n\n [alpha_min, alpha0, alphaR] = args[\n \"step\"\n ] # get minimum alpha, initial alpha, and alpha reduction rate from passed arguments\n\n # J = info['jacobian']\n # dX = -np.matmul(np.linalg.inv(J), err)\n dX = -np.matmul(info[\"stiffnessB\"], err)\n\n # ! Reduce dHF by factor (between 1 at I = 1 and 0 at I = MaxIter) that reduces linearly with iteration count\n # to ensure that we converge on a solution even in the case were we obtain a nonconvergent cycle about the\n # correct solution (this happens, for example, if we jump to quickly between a taut and slack catenary)\n\n alpha = 1.0 # M<<<<<<<< np.max([alpha_min, alpha0*(1.0 - alphaR*iter/maxIter)])\n\n # exponential approach alpha = alpha0 * np.exp( iter/maxIter * np.log(alpha_min/alpha0 ) )\n\n dX[0] = dX[0] * alpha # dHF*( 1.0 - Tol*I )\n dX[1] = dX[1] * alpha # dVF*( 1.0 - Tol*I )\n\n # To avoid an ill-conditioned situation, make sure HF does not go less than or equal to zero by having a lower limit of Tol*HF\n # [NOTE: the value of dHF = ( Tol - 1.0 )*HF comes from: HF = HF + dHF = Tol*HF when dHF = ( Tol - 1.0 )*HF]\n # dX[0] = max( dX[0], ( tol - 1.0 )*info['HF']);\n\n # To avoid an ill-conditioned situation, make sure HF does not get too close to zero, by forcing HF >= tols[0]\n # if info['HF'] + dX[0] <= tol*abs(info['VF']+dX[1]):\n # if info['HF'] + dX[0] <= tols[0]\n if X[0] + dX[0] <= tols[0]:\n # dX[0] = tol*abs(info['VF']+dX[1]) - info['HF']\n # dX[0] = tols[0] - info['HF']\n dX[0] = tols[0] - X[0]\n\n # To avoid an ill-conditioned situation where the line is nearly all on the seabed but the solver gets stuck,\n # if np.abs(err[1] + ZF)/ZF < tol:\n # breakpoint()\n # deltaHFVF = info['HF'] - info['VF']\n # dX[0] = dX[0] - 0.5*deltaHFVF\n # dX[1] = dX[1] + 0.5*deltaHFVF\n\n # prevent silly situation where a line with weight and positive ZF considers a negative VF\n if info[\"ProfileType\"] == 2:\n if X[1] + dX[1] <= tols[1]: # if vertical force is within tolerance of being zero/negative\n VFtarget = (L - info[\"LBot\"]) * W # set next VF value to be the weight of portion of line that's suspended\n dX[1] = VFtarget - X[1]\n\n return dX # returns dX (step to make)", "def _partial_dependence_calculation(pipeline, grid, features, X):\n predictions = []\n averaged_predictions = []\n\n if is_regression(pipeline.problem_type):\n prediction_method = pipeline.predict\n else:\n prediction_method = pipeline.predict_proba\n\n X_eval = X.ww.copy()\n for _, new_values in grid.iterrows():\n for i, variable in enumerate(features):\n part_dep_column = pd.Series(\n [new_values[i]] * X_eval.shape[0], index=X_eval.index\n )\n X_eval.ww[variable] = ww.init_series(\n part_dep_column, logical_type=X_eval.ww.logical_types[variable]\n )\n\n pred = prediction_method(X_eval)\n\n predictions.append(pred)\n # average over samples\n averaged_predictions.append(np.mean(pred, axis=0))\n\n n_samples = X.shape[0]\n\n # reshape to (n_instances, n_points) for binary/regression\n # reshape to (n_classes, n_instances, n_points) for multiclass\n predictions = np.array(predictions).T\n if is_regression(pipeline.problem_type) and predictions.ndim == 2:\n predictions = predictions.reshape(n_samples, -1)\n elif predictions.shape[0] == 2:\n predictions = predictions[1]\n predictions = predictions.reshape(n_samples, -1)\n\n # reshape averaged_predictions to (1, n_points) for binary/regression\n # reshape averaged_predictions to (n_classes, n_points) for multiclass.\n averaged_predictions = np.array(averaged_predictions).T\n if is_regression(pipeline.problem_type) and averaged_predictions.ndim == 1:\n averaged_predictions = averaged_predictions.reshape(1, -1)\n elif averaged_predictions.shape[0] == 2:\n averaged_predictions = averaged_predictions[1]\n averaged_predictions = averaged_predictions.reshape(1, -1)\n\n return averaged_predictions, predictions", "def geneffcut(energy, array, cutvals=hads, bins=BINS):\n binning = np.digitize(energy, bins) - 1\n binning[binning < 0] = 0.\n binning[binning >= len(bins)-1] = 0.\n hadeffcut = np.zeros(len(energy), dtype=bool)\n for i, cutval in enumerate(cutvals):\n binmask = binning == i\n hadeffcut[binmask] = array[binmask] < cutval\n binning = np.digitize(energy, bins) - 1\n binning[binning < 0] = -1\n binning[binning >= len(bins)-1] = -1\n hadeffcut[binning == -1] = 0\n\n return hadeffcut", "def greedy_filter_rule(np_array, filter_rule, money):\n i = 1\n stock_count = 0\n peaks = findPeakAndValley(np_array)\n while (i < len(np_array)):\n if (getSignal(np_array, i, filter_rule[FILTER_PREVIOUS_INDEX], peaks, filter_rule[FILTER_RATE_INDEX]) == 1):\n stock_count = int(money/np_array[i][STOCK_VALUE_INDEX])\n money = money - stock_count * np_array[i][STOCK_VALUE_INDEX]\n i = i + filter_rule[FILTER_HOLD_INDEX]\n while(i < len(np_array)):\n if(getSignal(np_array, i, filter_rule[FILTER_PREVIOUS_INDEX], peaks, filter_rule[FILTER_RATE_INDEX]) == -1):\n money = money + stock_count * np_array[i][STOCK_VALUE_INDEX]\n break\n i = i + filter_rule[FILTER_DELAY_INDEX]\n i = i + 1\n \n return money + stock_count * np_array[len(np_array) - 1][STOCK_VALUE_INDEX]", "def array_part_loops_pruning(loops, config):\n pruned_loops = []\n\n PE_lb = config['setting'][config['mode']\n ]['pruning']['array_part']['PE_num'][0]\n for loop in loops:\n if PE_lb == -1:\n pruned_loops.append(loop)\n else:\n prod = 1\n for l in loop:\n if l > 1:\n prod *= l\n if prod < PE_lb:\n continue\n pruned_loops.append(loop)\n\n return pruned_loops", "def compute(i, tas):\n packed_values = input_pack([elem_ta.read(i) for elem_ta in elems_ta])\n packed_fn_values = fn(packed_values)\n nest.assert_same_structure(dtype or elems, packed_fn_values)\n flat_fn_values = output_flatten(packed_fn_values)\n tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_fn_values)]\n return (i + 1, tas)", "def sliced_fun(f, n_slices):\n\n def sliced_f(sliced_inputs, non_sliced_inputs=None):\n if non_sliced_inputs is None:\n non_sliced_inputs = []\n if isinstance(non_sliced_inputs, tuple):\n non_sliced_inputs = list(non_sliced_inputs)\n n_paths = len(sliced_inputs[0])\n slice_size = max(1, n_paths // n_slices)\n ret_vals = None\n for start in range(0, n_paths, slice_size):\n inputs_slice = [v[start:start + slice_size] for v in sliced_inputs]\n slice_ret_vals = f(*(inputs_slice + non_sliced_inputs))\n if not isinstance(slice_ret_vals, (tuple, list)):\n slice_ret_vals_as_list = [slice_ret_vals]\n else:\n slice_ret_vals_as_list = slice_ret_vals\n scaled_ret_vals = [\n np.asarray(v) * len(inputs_slice[0])\n for v in slice_ret_vals_as_list\n ]\n if ret_vals is None:\n ret_vals = scaled_ret_vals\n else:\n ret_vals = [x + y for x, y in zip(ret_vals, scaled_ret_vals)]\n ret_vals = [v / n_paths for v in ret_vals]\n if not isinstance(slice_ret_vals, (tuple, list)):\n ret_vals = ret_vals[0]\n elif isinstance(slice_ret_vals, tuple):\n ret_vals = tuple(ret_vals)\n return ret_vals\n\n return sliced_f", "def dataset_handling_with_standardisation(init_data):\n #\n ##Maximum number of points = 72 , keep around 80 values for even number\n max_len = 80\n ##Fluxes, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'fluxes_0', u'fluxes_1', u'fluxes_2', u'fluxes_3', u'fluxes_4', u'fluxes_5']].values\n zp_array_flux = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux.append(n_data)\n zp_array_flux = np.array(zp_array_flux)\n print(zp_array_flux.shape)\n\n ##Fluxerrors, Standardisation is done over 1 type of feature\n data = init_data.loc[:,\n [u'fluxerrs_0', u'fluxerrs_1', u'fluxerrs_2', u'fluxerrs_3', u'fluxerrs_4', u'fluxerrs_5']].values\n zp_array_flux_error = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux_error.append(n_data)\n zp_array_flux_error = np.array(zp_array_flux_error)\n print(zp_array_flux_error.shape)\n\n ##Time, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'mjds_0', u'mjds_1', u'mjds_2', u'mjds_3', u'mjds_4', u'mjds_5']].values\n zp_array_mjds = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_mjds.append(n_data)\n zp_array_mjds = np.array(zp_array_mjds)\n print(zp_array_mjds.shape)\n\n ##Concatenating everything\n zp_data = np.c_[zp_array_flux, zp_array_flux_error, zp_array_mjds]\n\n ##Adding redshift info// Gal pos info might be necessary to remove\n zp_data = np.c_[\n zp_data, init_data.loc[:, [u'gal_b', u'gal_l', u'hostgal_photoz', u'hostgal_photoz_err', u'hostgal_specz', u'mwebv']].values]\n print(zp_data.shape)\n\n ##Load labels and convert to integer\n labels = init_data.loc[:, [u'target']].values\n labels = labels.flatten()\n labels_name = np.array([6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99])\n [np.place(labels, labels == labels_name[i], [i]) for i in range(len(labels_name))]\n\n return [zp_data, labels]", "def eval(self, gt_paths, pred_paths):\n assert self.num_worker is not None, \"Parameter 'num_worker' is not assigned\"\n assert len(gt_paths) == len(pred_paths), \"Size must equal!\"\n \n dists = list()\n iterable = [(gt_paths[i], pred_paths[i] ) for i in range(len(gt_paths))]\n for result in tqdm.tqdm(self.pool.istarmap(self.eval_method.eval_fn, iterable), total=len(iterable)):\n dists.append(result)\n\n return dists", "def compute_empirical_indel_priors(\n allele_table: pd.DataFrame,\n grouping_variables: List[str] = [\"intBC\"],\n cut_sites: Optional[List[str]] = None,\n) -> pd.DataFrame:\n\n if cut_sites is None:\n cut_sites = get_default_cut_site_columns(allele_table)\n\n agg_recipe = dict(\n zip([cut_site for cut_site in cut_sites], [\"unique\"] * len(cut_sites))\n )\n groups = allele_table.groupby(grouping_variables).agg(agg_recipe)\n\n indel_count = defaultdict(int)\n\n for g in groups.index:\n\n alleles = np.unique(np.concatenate(groups.loc[g].values))\n for a in alleles:\n if \"none\" not in a.lower():\n indel_count[a] += 1\n\n tot = len(groups.index)\n\n indel_freqs = dict(\n zip(list(indel_count.keys()), [v / tot for v in indel_count.values()])\n )\n\n indel_priors = pd.DataFrame([indel_count, indel_freqs]).T\n indel_priors.columns = [\"count\", \"freq\"]\n indel_priors.index.name = \"indel\"\n\n return indel_priors", "def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n raise NotImplemented", "def piecewise_extraction(function, to_split, block_size=1000, axis=0, **params):\n assert type(to_split) == str and to_split in params\n assert axis in [0, 1]\n\n # Won't want progress bars for each subset\n params['verbose'] = False\n\n # Need some other params for post processing\n sparse_output = params.get('return_sparse', False)\n sparse_df = params.get('sparse_df', True)\n\n # Retain a copy of the original parameters\n full_params = deepcopy(params)\n total = len(params[to_split])\n\n # Determine the number of iterations needed\n num_iter = total // block_size\n if total % block_size != 0:\n num_iter += 1\n\n all_results = []\n meta_results = []\n for i in tqdm(range(num_iter)):\n\n # Get the start and end indices\n start = i * block_size\n end = (i + 1) * block_size\n\n # End can't be larger than the total number items\n if end > total:\n end = total\n\n # Subset the parameter of interest\n params[to_split] = full_params[to_split][start: end]\n\n # Get the function results\n current_result = function(**params)\n if type(current_result) == pd.DataFrame:\n all_results.append(current_result)\n elif type(current_result) == tuple:\n # The feature matrix is always the last element of the tuple\n all_results.append(current_result[-1])\n # Other metadata can be stored in other parts of the tuple\n meta_results.append(current_result[:-1])\n\n # This combination results in sparse matrices returned\n if sparse_output and not sparse_df:\n if axis == 0:\n # vstack works best on csr matrcies\n if type(results[0]) != csr_matrix:\n all_results = [r.to_coo().to_csr() for r in all_results]\n out = vstack(all_results)\n else:\n # hstack works best on csc matrcies\n if type(all_results[0]) != csc_matrix:\n all_results = [r.to_coo().to_csc() for r in all_results]\n out = hstack(all_results)\n\n # Dataframe result, use concat to join\n else:\n out = pd.concat(all_results, sort=False, axis=axis)\n\n # right now meta will either be None, (DataFrame, ) or ((DataFrame, List), )\n # Pair Counts, datframe result, or sparse mat result respectively\n # But will try to write this as specifically as possible so other options can be added in future\n out_meta = []\n if meta_results:\n for mr in meta_results:\n # Second case, only 1 dataframe so return\n if len(mr) == 1 and type(mr[0]) == pd.DataFrame:\n return mr[0], out\n\n # Third case, dataframe and list... need to join the list\n elif len(mr) == 1 and type(mr[0] == tuple):\n for elem in mr[0]:\n if type(elem) == pd.DataFrame:\n out_m_df = elem\n elif type(elem) == list:\n out_meta.append(elem)\n return (out_m_df, list(chain(*out_meta))), out", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = []\n temp=[]\n ### your code goes here\n for x in xrange(len(predictions)):\n cleaned_data.append((ages[x],net_worths[x],abs(net_worths[x]-predictions[x])))\n \n cleaned_data.sort(key= lambda tup : tup[2], reverse= False)\n cleaned_data=cleaned_data[:81]\n print(len(cleaned_data))\n return cleaned_data", "def fitness(image, delta_x, length, individ):\n \n summa = 0\n sum_vrt = 0\n for i in range(length): \n sum_ = np.sum(image[individ[i], i*delta_x:i*delta_x+delta_x])\n if i>0:\n if individ[i]>individ[i-1]:\n sum_vrt = np.sum(image[individ[i-1]:individ[i], i*delta_x])\n else:\n sum_vrt = np.sum(image[individ[i]:individ[i-1], i*delta_x])\n summa=summa + sum_ + sum_vrt \n return summa", "def compute(self,filter_name):\n self.result = []\n for img in self.imgs:\n r = filters_dict[filter_name](img)\n if \"threshold\" in filter_name:\n r = img>r\n r = 1.0*r.copy()\n self.result.append(r)", "def get_mean_in_time(trajectories, nb_bins=15, freq_range=[0.4, 0.6]):\n # Create bins and select trajectories going through the freq_range\n time_bins = np.linspace(-950, 2000, nb_bins)\n trajectories = [traj for traj in trajectories if np.sum(np.logical_and(\n traj.frequencies >= freq_range[0], traj.frequencies < freq_range[1]), dtype=bool)]\n\n # Offset trajectories to set t=0 at the point they are seen in the freq_range and adds all the frequencies / times\n # to arrays for later computation of mean\n t_traj = np.array([])\n f_traj = np.array([])\n for traj in trajectories:\n idx = np.where(np.logical_and(traj.frequencies >=\n freq_range[0], traj.frequencies < freq_range[1]))[0][0]\n traj.t = traj.t - traj.t[idx]\n t_traj = np.concatenate((t_traj, traj.t))\n f_traj = np.concatenate((f_traj, traj.frequencies))\n\n # Binning of all the data in the time bins\n filtered_fixed = [traj for traj in trajectories if traj.fixation == \"fixed\"]\n filtered_lost = [traj for traj in trajectories if traj.fixation == \"lost\"]\n freqs, fixed, lost = [], [], []\n for ii in range(len(time_bins) - 1):\n freqs = freqs + [f_traj[np.logical_and(t_traj >= time_bins[ii], t_traj < time_bins[ii + 1])]]\n fixed = fixed + [len([traj for traj in filtered_fixed if traj.t[-1] < time_bins[ii]])]\n lost = lost + [len([traj for traj in filtered_lost if traj.t[-1] < time_bins[ii]])]\n\n # Computation of the mean in each bin, active trajectories contribute their current frequency,\n # fixed contribute1 and lost contribute 0\n mean = []\n for ii in range(len(freqs)):\n mean = mean + [np.sum(freqs[ii]) + fixed[ii]]\n mean[-1] /= (len(freqs[ii]) + fixed[ii] + lost[ii])\n\n nb_active = [len(freq) for freq in freqs]\n nb_dead = [fixed[ii] + lost[ii] for ii in range(len(fixed))]\n\n return 0.5 * (time_bins[1:] + time_bins[:-1]), mean, nb_active, nb_dead", "def iterations_test(start=700, end=1500, step=10, smoothing=3):\n global obj_func, pop_size\n\n iter_counts = range(start, end, step)\n results_pso = []\n results_de = []\n for iterations in iter_counts:\n result_pso = 0\n result_de = 0\n\n stopping_criterion = problem.StoppingCriterion('nr_steps', iterations)\n\n for i in xrange(smoothing):\n result_pso += obj_func.func(solver.minimize(obj_func,\n method='pso',\n stopping_criterion=stopping_criterion,\n population_size=pop_size))\n\n result_de += obj_func.func(solver.minimize(obj_func,\n method='de',\n stopping_criterion=stopping_criterion,\n population_size=pop_size))\n result_pso /= float(smoothing)\n result_de /= float(smoothing)\n\n results_pso.append(result_pso)\n results_de.append(result_de)\n\n # scale iter counts:\n iter_counts = [pop_size * ic for ic in iter_counts]\n\n plt.plot(iter_counts, results_pso, 'bo-', label='pso')\n plt.plot(iter_counts, results_de, 'r^-', label='de')\n plt.legend()\n plt.title('Comparison of pso and de methods using a fixed amount of iterations.')\n plt.xlabel('Iteration counts')\n plt.ylabel('Best fitness acheived.')\n plt.show()", "def _pool_outputs_from_function(self, outputs):\n # TODO: simplify after cleaning gs.squeeze\n all_arrays = gs.all([gs.is_array(factor_output) for factor_output in outputs])\n if (\n all_arrays\n and _all_equal([factor_output.shape for factor_output in outputs])\n and gs.all([gs.is_bool(factor_output) for factor_output in outputs])\n or (not all_arrays)\n ):\n outputs = gs.stack([gs.array(factor_output) for factor_output in outputs])\n outputs = gs.all(outputs, axis=0)\n return outputs\n\n try:\n return self.embed_to_product(outputs)\n except geomstats.errors.ShapeError:\n raise RuntimeError(\n \"Could not combine outputs - they are not points of the individual\"\n \" factors.\"\n )\n except ValueError:\n raise RuntimeError(\n \"Could not combine outputs, probably because they could\"\n \" not be concatenated or stacked.\"\n )", "def cut(self, array_obj):\n\n pass", "def apply_filters(self):\n hurst_cut = 0\n coint_cut = 0\n half_life_cut = 0\n mean_cross_cut = 0\n\n # Create an empty list for pairs that pass the filter tests\n validated_pairs = []\n\n # Create all the pairs combination\n self.create_pair_differences()\n\n # Print the number of potential pairs\n print(f\"Number of potential pairs in before filter: {len(self.__pairs_data)}\")\n\n for pair in self.__pairs_data:\n # Select the stocks from the pair\n stock1 = pair[0]\n stock2 = pair[1]\n\n # Test the hurst filter\n if self.hurst_filter(self, stock1=stock1, stock2=stock2):\n hurst_cut += 1\n if self.engel_filter(self, stock1=stock1, stock2=stock2):\n coint_cut += 1\n if self.half_life_filter(self, stock1=stock1, stock2=stock2):\n half_life_cut += 1\n if self.mean_cross_filter(self, stock1=stock1, stock2=stock2):\n mean_cross_cut += 1\n validated_pairs.append([stock1, stock2])\n\n print(f\"Hurst filter pass: {hurst_cut}\")\n print(f\"Co-integration filter pass: {coint_cut}\")\n print(f\"Half-life filter pass: {half_life_cut}\")\n print(f\"Mean-cross filter pass: {mean_cross_cut}\")\n print(f\"Final Number of validated pairs: {len(validated_pairs)}\")\n print(\"The final validated pairs are: \")\n print(validated_pairs)\n\n # Save it to the attribute\n self.__validated_pairs = validated_pairs\n self.__validated_pairs_diff = self.__pair_diff[self.symbolize_pairs(self.__validated_pairs)]", "def _calculate_percentile_cutoff(run_numbers):\n mcp_values = []\n andor_values = []\n for run_number in run_numbers:\n current_data_path = ''.join([DATA_PATH, 'run', str(run_number), 'allevts.h5'])\n f = h5py.File(current_data_path, 'r')\n current_phot = _get_photon_energy(f, run_number)\n current_mcp = np.array(f['Acqiris2']['acq'])\n current_mcp = current_mcp[(current_phot > 781) & (current_phot < 782)]\n mcp_values.extend(current_mcp)\n current_andor = np.array(f['Andor']['signal'])\n current_andor = current_andor[(current_phot > 781) & (current_phot < 782)]\n andor_values.extend(current_andor)\n #plt.figure()\n #plt.scatter(mcp_values, andor_values)\n mcp_percentile_cutoff = min([percentileofscore(andor_values, 4000), 99.9])\n return mcp_percentile_cutoff", "def n0derivative_cltt(cl_array,bins,n0bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n delta=diff_cl(cl_array,bins)\n\n for i in range(len(array1001)):\n print(i)\n a=compute_n0_py(clpp,cls,array1001[i],clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n b=compute_n0_py(clpp,cls,array999[i],clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n0bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n0bins)]-N0999[k][i][:len(n0bins)])*(n0bins*(n0bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n0{}dcltt.txt'.format(keys[k]),der)\n return derlist", "def calc_data(self):\n\n circ_counts = {}\n for trialidx in range(self._ntrials):\n for _, depth in enumerate(self._depths):\n circ_name = 'qv_depth_%d_trial_%d' % (depth, trialidx)\n\n # get the counts form ALL executed circuits\n count_list = []\n for result in self._result_list:\n try:\n count_list.append(result.get_counts(circ_name))\n except (QiskitError, KeyError):\n pass\n\n circ_counts[circ_name] = \\\n build_counts_dict_from_list(count_list)\n\n self._circ_shots[circ_name] = \\\n sum(circ_counts[circ_name].values())\n\n # calculate the heavy output probability\n self._heavy_output_counts[circ_name] = \\\n self._subset_probability(\n self._heavy_outputs[circ_name],\n circ_counts[circ_name])", "def diff_events_after_cut(events, rates, obstime, feature, cut, gamma_efficiency):\n\n total_events = np.sum(rates) * obstime\n\n if feature == \"gammaness\":\n events_after_cut = np.sum(rates[events[feature] > cut]) * obstime\n\n else:\n events_after_cut = np.sum(rates[events[feature] < cut]) * obstime\n\n return gamma_efficiency * total_events - events_after_cut", "def compute_fold_regulation(fc_data_list):\n # if isinstance(fc_data_list, list):\n return [{TEST_SAMPLE: fc_elem[TEST_SAMPLE], CONTROL_SAMPLE: fc_elem[CONTROL_SAMPLE],\n FOLD_REGULATION: fc_elem[FOLD_CHANGE].applymap(lambda x: -1.0/x if x < 1.0 else x)}\n for fc_elem in fc_data_list]\n # return fc_data_list.applymap(lambda x: -1.0/x if x < 1.0 else x)", "def dim_reduction(data_set, components):\n transformed = []\n index = -1\n transformed = data_set @ components\n return transformed", "def sampling_optimization(drive_contour: np.ndarray, driven_contour: np.ndarray, k: int, sampling_count: (int, int),\n keep_count: int, resampling_accuracy: int, comparing_accuracy: int, debugger: Reporter,\n max_sample_depth: int = 5, max_iteration: int = 1, smoothing: Tuple[int, int] = (0, 0),\n visualization: Union[Dict, None] = None, draw_tar_functions: bool = False) \\\n -> List[Tuple[float, float, float, float, float, np.ndarray, np.ndarray]]:\n drive_contour = counterclockwise_orientation(drive_contour)\n driven_contour = counterclockwise_orientation(driven_contour)\n drive_polygon = Polygon(drive_contour)\n driven_polygon = Polygon(driven_contour)\n drive_polar = toExteriorPolarCoord(drive_polygon.centroid, drive_contour, resampling_accuracy)\n driven_polar = toExteriorPolarCoord(driven_polygon.centroid, driven_contour, resampling_accuracy)\n drive_smoothing, driven_smoothing = smoothing\n drive_contour = getUniformContourSampledShape(drive_contour, resampling_accuracy, drive_smoothing > 0)\n driven_contour = getUniformContourSampledShape(driven_contour, resampling_accuracy, driven_smoothing > 0)\n visualize_config = {\n 'fig_size': (16, 9),\n }\n subplots = None\n if visualization is not None:\n visualize_config.update(visualization)\n plt.ion()\n fig, subplots = plt.subplots(3, 2)\n fig.set_size_inches(*visualize_config['fig_size'])\n update_polygon_subplots(drive_contour, driven_contour, subplots[0])\n\n debugging_root_directory = debugger.get_root_debug_dir_name()\n results = []\n # following two variables change during iteration\n drive = drive_contour\n driven = driven_contour\n for iteration_count in range(max_iteration):\n debug_directory = os.path.join(debugging_root_directory, f'iteration_{iteration_count}')\n os.makedirs(debug_directory, exist_ok=True)\n drive = counterclockwise_orientation(drive)\n new_res = sample_drive_gear(drive, driven_contour, k, sampling_count, keep_count, comparing_accuracy,\n max_sample_depth, debug_directory, subplots[1] if subplots is not None else None)\n results += [(None, score, *center, center_distance, drive, driven)\n for score, *center, center_distance, driven in new_res]\n for index, result in enumerate(results):\n total_score, score, *center, center_distance, this_drive, driven = result\n if subplots is not None:\n update_polygon_subplots(drive_contour, driven_contour,\n subplots[0]) # so that the two subplots can iterate\n update_polygon_subplots(this_drive, driven, subplots[1])\n subplots[1][0].scatter(center[0], center[1], 3)\n subplots[1][0].text(0, 0, str(center))\n subplots[1][1].text(0, 0, str(score))\n subplots[1][1].scatter(0, 0, 3)\n if draw_tar_functions:\n tars = [triangle_area_representation(contour, comparing_accuracy)\n for contour in (this_drive, driven)]\n for subplot, tar in zip(subplots[2], tars):\n tar = tar[:, 0]\n subplot.clear()\n subplot.plot(range(len(tar)), tar, color='blue')\n if total_score is None:\n total_score = score + shape_difference_rating(this_drive, drive_contour, comparing_accuracy,\n distance_function=trivial_distance)\n results[index] = (total_score, *result[1:])\n score_str = \"%.8f\" % total_score\n plt.savefig(os.path.join(debug_directory, f'final_result_{index}_{score_str}.png'))\n save_contour(os.path.join(debug_directory, f'final_result_{index}_drive.dat'), this_drive)\n save_contour(os.path.join(debug_directory, f'final_result_{index}_driven.dat'), driven)\n *_, drive, driven = results[-1] # get the last result\n drive_contour, driven_contour = driven_contour, drive_contour\n drive_polygon, driven_polygon = driven_polygon, drive_polygon\n drive_polar, driven_polar = driven_polar, drive_polar\n drive, driven = driven, drive\n drive_smoothing, driven_smoothing = driven_smoothing, drive_smoothing\n # drive_poly = Polygon(drive)\n # drive = shape_average(drive_polar, toExteriorPolarCoord(Polygon(drive).centroid, drive, resampling_accuracy),\n # drive_polygon.area, drive_poly.area)\n drive = phi_average.shape_average(drive_polar,\n toExteriorPolarCoord(Polygon(drive).centroid, drive, resampling_accuracy))\n drive = toCartesianCoordAsNp(drive, 0, 0)\n drive = getUniformContourSampledShape(drive, resampling_accuracy, drive_smoothing > 0)\n for subplot in subplots[2]:\n subplot.clear()\n return results", "def _get_f50_worker(self, ra, dec, wave, sncut, \n direct_sigmas = False, linewidth = None):\n\n logger = logging.getLogger(name=\"ShotSensitivity\")\n\n try:\n [x for x in ra]\n except TypeError:\n ra = array([ra]) \n dec = array([dec]) \n wave = array([wave]) \n\n coords = SkyCoord(ra=ra, dec=dec, unit=\"deg\")\n wave_rect = self.extractor.get_wave()\n pixsize_aa = wave_rect[1] - wave_rect[0]\n\n # This will give 999 once the noise is scaled suitably\n badval = 999*1e17/pixsize_aa\n\n # Size of window in wave elements\n filter_len = 2*self.wavenpix + 1\n\n if type(wave) != type(None):\n wave_passed = True\n else:\n wave_passed = False\n convolution_filter = ones(filter_len) \n mask = True*ones(len(coords), dtype=int)\n \n noise = []\n \n info_results = self.extractor.get_fiberinfo_for_coords(\n coords,\n radius=self.rad,\n ffsky=self.ffsky,\n return_fiber_info=True,\n fiber_lower_limit=2, \n verbose=False\n )\n\n id_, aseps, aifux, aifuy, axc, ayc, ara, adec, adata, aerror, afmask, afiberid, \\\n amultiframe = info_results\n \n \n I = None\n fac = None\n norm_all = []\n amp = [] \n nan_fib_mask = []\n\n for i, c in enumerate(coords):\n \n sel = (id_ == i)\n\n if type(wave) != type(None):\n logger.debug(\"Running on source {:f} {:f} {:f}\".format(ra[i], dec[i], wave[i]))\n else:\n logger.debug(\"Running on position {:f} {:f}\".format(ra[i], dec[i]))\n\n logger.debug(\"Found {:d} fibers\".format(sum(sel)))\n\n if sum(sel) > 0:\n \n # fiber properties \n xc = axc[sel][0]\n yc = ayc[sel][0]\n ifux = aifux[sel]\n ifuy = aifuy[sel]\n data = adata[sel]\n error = aerror[sel]\n fmask = afmask[sel]\n fiberid = afiberid[sel]\n multiframe = amultiframe[sel]\n seps = aseps[sel]\n\n # Flag the zero elements as bad\n fmask[(abs(data) < 1e-30) | (abs(error) < 1e-30)] = False\n\n iclosest = argmin(seps)\n\n amp.append(fiberid[iclosest])\n\n if len(self.bad_amps) > 0:\n amp_flag = amp_flag_from_fiberid(fiberid[iclosest], \n self.bad_amps)\n else:\n amp_flag = True\n \n # XXX Could be faster - reloads the file every run\n meteor_flag = meteor_flag_from_coords(c, self.shotid)\n\n if not (amp_flag and meteor_flag):\n logger.debug(\"The data here are bad, position is masked\")\n if wave_passed:\n noise.append(badval)\n norm_all.append(1.0)\n # value doesn't matter as in amp flag\n nan_fib_mask.append(True)\n continue\n else:\n mask[i] = False\n \n weights, I, fac = self.extractor.build_weights(xc, yc, ifux, ifuy, self.moffat, \n I=I, fac=fac, return_I_fac = True)\n \n # (See Greg Zeimann's Remedy code)\n # normalized in the fiber direction\n norm = sum(weights, axis=0) \n weights = weights/norm\n\n\n result = self.extractor.get_spectrum(data, error, fmask, weights,\n remove_low_weights = False,\n sclean_bad = self.sclean_bad,\n return_scleaned_mask = True)\n \n spectrum_aper, spectrum_aper_error, scleaned = [res for res in result] \n \n if wave_passed:\n \n index = where(wave_rect >= wave[i])[0][0]\n ilo = index - self.wavenpix\n ihi = index + self.wavenpix + 1\n\n # If lower index less than zero, truncate\n if ilo < 0:\n ilo = 0\n \n if ihi < 0:\n ihi = 0\n \n # Output lots of information for very detailed debugging\n if logger.getEffectiveLevel() == logging.DEBUG: \n logger.debug(\"Table of fibers:\")\n logger.debug(\"# fiberid wave_index ifux ifuy weight noise mask\")\n for fibidx, fid in enumerate(fiberid):\n for wi, (tw, tnoise, tmask) in enumerate(zip((weights*norm)[fibidx, ilo:ihi], \n error[fibidx, ilo:ihi], fmask[fibidx, ilo:ihi]), \n ilo): \n logger.debug(\"{:s} {:d} {:f} {:f} {:f} {:f} {:s}\".format(fid, wi, ifux[fibidx], \n ifuy[fibidx], tw, tnoise, \n str(tmask)))\n\n\n # Mask source if bad values within the central 3 wavebins\n nan_fib = bad_central_mask(weights*norm, logical_not(fmask), \n index) \n nan_fib_mask.append(nan_fib)\n\n # Account for NaN and masked spectral bins\n bad = isnan(spectrum_aper_error[ilo:ihi])\n goodfrac = 1.0 - sum(bad)/len(bad)\n\n\n if all(isnan(spectrum_aper_error[ilo:ihi])):\n sum_sq = badval\n else:\n sum_sq = \\\n sqrt(nansum(square(spectrum_aper_error[ilo:ihi])/goodfrac))\n\n norm_all.append(mean(norm[ilo:ihi]))\n noise.append(sum_sq)\n else:\n logger.debug(\"Convolving with window to get flux limits versus wave\")\n\n\n # Use astropy convolution so NaNs are ignored\n convolved_variance = convolve(square(spectrum_aper_error),\n convolution_filter, \n normalize_kernel=False)\n std = sqrt(convolved_variance)\n\n # Also need to convolve aperture corrections to get\n # a total apcor across the wavelength window\n convolved_norm = convolve(norm,\n convolution_filter, \n normalize_kernel=True)\n\n # To get mean account for the edges in\n # the convolution\n for iend in range(self.wavenpix):\n edge_fac = filter_len/(filter_len + iend - self.wavenpix)\n convolved_norm[iend] *= edge_fac\n convolved_norm[-iend - 1] *= edge_fac\n\n\n # Mask wavelengths with too many bad pixels\n # equivalent to nan_fib in the wave != None mode\n wunorm = weights*norm\n for index in range(len(convolved_variance)):\n if not bad_central_mask(wunorm, logical_not(fmask), index):\n std[index] = badval\n\n noise.append(std)\n norm_all.append(convolved_norm)\n \n else:\n if wave_passed:\n noise.append(badval)\n norm_all.append(1.0)\n amp.append(\"000\")\n nan_fib_mask.append(True)\n else:\n noise.append(badval*ones(len(wave_rect)))\n norm_all.append(ones(len(wave_rect)))\n amp.append(\"000\")\n mask[i] = False\n\n\n \n # Apply the galaxy mask \n gal_mask = ones(len(coords), dtype=int)\n for gal_region in self.gal_regions:\n dummy_wcs = create_dummy_wcs(gal_region.center,\n imsize=2*gal_region.height)\n # zero if near galaxy\n gal_mask = gal_mask & invert(gal_region.contains(coords, dummy_wcs))\n\n noise = array(noise)\n snoise = pixsize_aa*1e-17*noise\n\n if wave_passed:\n\n bad = (gal_mask < 0.5) | (snoise > 998) | isnan(snoise) | invert(nan_fib_mask)\n\n normnoise = snoise/norm_all\n\n if not direct_sigmas:\n normnoise = self.f50_from_noise(normnoise, wave, sncut,\n linewidth = linewidth)\n\n \n normnoise[bad] = 999.\n\n return normnoise, amp, norm_all\n\n else:\n mask[gal_mask < 0.5] = False\n \n if self.badshot:\n mask[:] = False\n\n bad = (snoise > 998) | logical_not(isfinite(snoise))\n normnoise = snoise/norm_all\n\n if not direct_sigmas:\n normnoise = self.f50_from_noise(normnoise, wave, sncut,\n linewidth = linewidth)\n\n normnoise[bad] = 999\n\n return normnoise, mask, amp, norm_all", "def cutflow(self, *names):\n for cut in names:\n if not isinstance(cut, str) or cut not in self._names:\n raise ValueError(\n \"All arguments must be strings that refer to the names of existing selections\"\n )\n\n masksonecut, maskscutflow = [], []\n for i, cut in enumerate(names):\n mask1 = self.any(cut)\n mask2 = self.all(*(names[: i + 1]))\n masksonecut.append(mask1)\n maskscutflow.append(mask2)\n\n if not self.delayed_mode:\n nevonecut = [len(self._data)]\n nevcutflow = [len(self._data)]\n nevonecut.extend(numpy.sum(masksonecut, axis=1))\n nevcutflow.extend(numpy.sum(maskscutflow, axis=1))\n\n else:\n nevonecut = [dask_awkward.count(self._data, axis=0)]\n nevcutflow = [dask_awkward.count(self._data, axis=0)]\n nevonecut.extend([dask_awkward.sum(mask1) for mask1 in masksonecut])\n nevcutflow.extend([dask_awkward.sum(mask2) for mask2 in maskscutflow])\n\n return Cutflow(\n names, nevonecut, nevcutflow, masksonecut, maskscutflow, self.delayed_mode\n )", "def _optimization_closure(self, iteration, step):\n aug = self._get_augmentation(iteration)\n if iteration == self.num_iter_per_step - 1:\n reg_noise_std = 0\n aug = 0\n else:\n reg_noise_std = (1 / 1000.) * (iteration // 400)\n # creates left_net_inputs and right_net_inputs by adding small noise\n clean_nets_inputs = [clean_net_input[aug] + (clean_net_input[aug].clone().normal_() * reg_noise_std)\n for clean_net_input in self.clean_nets_inputs]\n watermark_net_input = self.watermark_net_input[aug] # + (self.watermark_net_input[aug].clone().normal_() * reg_noise_std)\n mask_net_input = self.mask_net_input[aug]\n # applies the nets\n self.clean_nets_outputs = [clean_net(clean_net_input) for clean_net, clean_net_input\n in zip(self.clean_nets, clean_nets_inputs)]\n self.watermark_net_output = self.watermark_net(watermark_net_input)\n self.mask_net_output = self.mask_net(mask_net_input)\n self.total_loss = 0\n self.blur = 0\n\n self.total_loss += sum(self.l1_loss(self.watermark_net_output * self.mask_net_output +\n clean_net_output * (1 - self.mask_net_output), image_torch[aug])\n for clean_net_output, image_torch in zip(self.clean_nets_outputs, self.images_torch))\n self.total_loss.backward(retain_graph=True)", "def n1derivative_clee(cl_array,bins,n1bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n \n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n \n a=compute_n1_py(clpp,norms,cls,cltt,array1001[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n b=compute_n1_py(clpp,norms,cls,cltt,array999[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n\n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n1bins)]-N0999[k][i][:len(n1bins)])*(n1bins*(n1bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n1{}dclee.txt'.format(keys[k]),der)\n return derlist", "def get_kernel_functions(self, *args, **kwargs):\n probe = self._measurement_manager.probe_at_single_point\n return (LinearMixture([(component, probe(component, *args, **kwargs))\n for component in self._field_components])\n / self.number_of_basis)", "def _loop_over_entries(x_bins, y_bins, used_eff, n_out_bins):\n sys.stdout.write('finding eff at rejection... ')\n sys.stdout.flush()\n\n valid_x = (x_bins >= 0) & (x_bins < n_out_bins) \n valid_y = (y_bins >= 0) & (y_bins < n_out_bins)\n\n valid_indices = np.flatnonzero(valid_x & valid_y)\n\n x_bins = x_bins[valid_indices]\n y_bins = y_bins[valid_indices]\n used_eff = used_eff[valid_indices]\n\n eff_array = np.ones((n_out_bins,n_out_bins)) * -1\n for x_bin, y_bin, z in zip(x_bins, y_bins, used_eff): \n # y_bin comes first because that's what imshow wants... \n eff_array[y_bin,x_bin] = max(z, eff_array[y_bin,x_bin])\n\n sys.stdout.write('done\\n')\n return eff_array", "def get_statistics(pred, gt, num_cls=2):\n h,w = gt.shape\n statistics = []\n for i in range(num_cls):\n tp = np.sum((pred==i)&(gt==i))\n fp = np.sum((pred==i)&(gt!=i))\n fn = np.sum((pred!=i)&(gt==i)) \n statistics.append([tp, fp, fn])\n return statistics", "def evaluateSet( self, numberOfSamples=10000, kernels_to_test=[], threshold=10e-12 ):\n\n if( len(kernels_to_test)==0 ):\n kernels_to_test = self.kernel_sizes\n\n trainSet_likelihoods = []\n testSet_likelihoods = []\n\n _,indexes = self.drawSamples(numberOfSamples)\n likelihoods = {}\n\n for j in range(0, int(100/(100-self.trainSize)) ):\n\n print(\"Process Fold #\" + str(j))\n stepsize = int(np.ceil(len(indexes)*(100-self.trainSize)/100))\n\n self.testSet = indexes[j*stepsize:(j+1)*stepsize]\n self.trainingSet = indexes[:(j)*stepsize] + indexes[(j+1)*stepsize:]\n\n emptyMatrix = np.zeros( self.blurred_image.shape )\n for k in self.trainingSet:\n emptyMatrix[ k ] += 1\n\n for i,kernelSize in enumerate(kernels_to_test):\n estimated_pdf = self.estimateDensity( emptyMatrix, kernelSize )\n estimated_pdf = estimated_pdf + np.ones( estimated_pdf.shape )*threshold\n likelihoods.setdefault(kernelSize, []).append( np.sum( np.log( estimated_pdf.T[ self.testSet ] ) ) )\n\n print(likelihoods)\n averageLikelihoods = []\n\n for kernelSize in kernels_to_test:\n averageLikelihoods.append( np.sum(likelihoods[kernelSize])/len(likelihoods[kernelSize]) )\n\n print(kernels_to_test)\n print(averageLikelihoods)\n\n maxIndex = np.argmax( averageLikelihoods )\n maxLikelihood = np.max(averageLikelihoods)\n print( \"maxLikelihood: \" + str(maxLikelihood) )\n print( \"optimum Kernel: \" + str(kernels_to_test[maxIndex]))", "def find_backstats(f_arr, sigma, niter):\n ave = f_arr.mean()\n std = f_arr.std()\n for i in range(niter):\n mask = (abs(f_arr - ave) < sigma * std)\n ave = f_arr[mask].mean()\n std = f_arr[mask].std()\n return ave, std", "def evaluate_batch(self, pipelines):", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = []\n error = []\n error2 = []\n for i in range(len(predictions)):\n error.append(abs(net_worths[i] - predictions[i]))\n error2.append((net_worths[i] - predictions[i]))\n error.sort()\n split = int(len(error)*0.1)\n errcut = error[len(error)-split]\n checker = error[len(error)-split : len(error)]\n\n ### your code goes here\n for i in range(len(error2)):\n if abs(error2[i]) in checker:\n continue\n else:\n cleaned_data.append((ages[i], net_worths[i], error2[i]))\n return cleaned_data", "def evaluate_fitness(program, task):\n score = 0\n\n # For each sample\n for sample in task:\n i = np.array(sample['input'])\n o = np.array(sample['output'])\n\n # For each fitness function\n images = evaluate(program, i)\n score += total_fitness(images[0], o)\n\n return score", "def evaluate_polynomials(polynomials: List[Poly]):\n fft = MultiDimNonBinaryFFT(field, root_of_unity, width)\n values = fft.multi_fft(polynomials)\n return values", "def getstats(img, thresholds):\n number = np.zeros(img.shape, np.float64)\n ev = np.zeros(img.shape, np.float64)\n scatter = np.zeros(img.shape, np.float64)\n for n, s, low, high, evs in thresholds:\n for i in numba.prange(img.shape[0]):\n for j in numba.prange(img.shape[1]):\n if (low < img[i, j]) and (img[i, j] < high):\n scatter[i, j] = s\n number[i, j] = n\n ev[i, j] = img[i, j] - evs\n return ev, number, scatter", "def num_44():\n def block_array(a, rows=3, cols=4, col_first=True, nodata=-1):\n \"\"\" a variant on array_split\n requires a N*m array\n \"\"\"\n s = np.array(a.shape)\n w = np.array([rows, cols])\n m = divmod(s, w)\n new_shape = w*m[0] + w*(m[1]!=0)\n ypad, xpad = new_shape - a.shape \n b = np.pad(a, pad_width=((0, ypad),(0, xpad)), \n mode='constant', \n constant_values=((nodata, nodata),(nodata, nodata)))\n rn, cn = new_shape\n x_s = np.arange(0, cn+cols, cols)[1:] #.tolist()\n y_s = np.arange(0, rn+rows, rows)[1:] #.tolist()\n print(\"x_s {}\\ny_s {}\".format(x_s, y_s))\n #c = np.array([i for i in np.hsplit(b, x_s) if len(i) > 0])\n c = np.array([i for i in np.split(b, x_s, axis=1) if len(i) > 0])\n d = np.array([i for i in np.split(c, y_s, axis=1) if len(i) > 0])\n e = d.swapaxes(0, 1)\n ix = np.in1d(e.ravel(), nodata).reshape(e.shape)\n f = np.ma.array(e, mask=ix, fill_value=-1)\n return b, c, d, e, f\n y, x = 9, 11\n a = np.arange(x*y).reshape(y,x)\n b, c, d, e, f = block_array(a)\n print(\"\\n{}\".format(num_44.__doc__)) \n for i in [a, b, c, d, e, f]:\n _f(i)\n return a, b, c, d, e, f", "def custom_filter(some_func, iterator_list):\n\n local_iterator = from_input_to_list(iterator_list)\n func_map = [some_func(i) for i in local_iterator]\n true_list = [j for j in func_map if j > 100] # here we can hardcode any condition\n\n return true_list", "def outlierCleaner(predictions, ages, net_worths):\n\n\n cleaned_data = []\n ### your code goes here\n # print predictions\n # print ages\n # print net_worths\n\n # errors = predictions - net_worths\n # errors = map(lambda x: x ** 2, errors)\n # idx = [sorted(range(len(errors)), key=lambda k: errors[k])]\n # errors = sorted(errors)\n # errors = errors[0:(int(len(errors) * 0.9))]\n # ages = ages[idx][0:(int(len(ages) * 0.9))]\n # net_worths = net_worths[idx][0:(int(len(net_worths) * 0.9))]\n # errors = [e for inner_list in errors for e in inner_list]\n # ages = [e for inner_list in ages for e in inner_list]\n # net_worths = [e for inner_list in net_worths for e in inner_list]\n # cleaned_data = [tuple(ages), tuple(net_worths)]\n # print cleaned_data\n\n for i in range(0, len(predictions)):\n cleaned_data.append((ages[i], net_worths[i], abs(net_worths[i] - predictions[i])))\n cleaned_data = sorted(cleaned_data, key=lambda x:x[2])\n\n return cleaned_data[:int(len(cleaned_data) * 0.9)]", "def n0derivative_clee(cl_array,bins,n0bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n \n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n0_py(clpp,cls,cltt,array1001[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n b=compute_n0_py(clpp,cls,cltt,array999[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n\n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n0bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n0bins)]-N0999[k][i][:len(n0bins)])*(n0bins*(n0bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n0{}dclee.txt'.format(keys[k]),der)\n print(derlist)\n return derlist", "def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result", "def outlierCleaner(predictions, ages, net_worths):\n from itertools import izip \n \n #print'Pass data good'\n cleaned_data = []\n\n ### your code goes here\n counter=0\n error= [abs(x-y) for x, y in izip(predictions, net_worths)]\n for counter in range(0,4):\n\t\tmax_pos=0\n\t\tfor max_error in error:\n\t\t\tif max_error==max(error):\n\t\t\t\tmax_pos=error.index(max_error)\n\t\t\t\tprint max_error,max_pos\n\t\t\t\tcounter=counter+1\n\t\t\t\tprint counter\n\t\t\t\terror.remove(error[max_pos])\n\t\t\t\tpredictions.remove(predictions[max_pos])\n\t\t\t\tages.remove(ages[max_pos])\n\t\t\t\tnet_worths.remove(net_worths[max_pos])\n\t\t\t\n \n cleaned_data=[list(x) for x in izip(ages, net_worths, error)]\n print len(predictions)\n print len(ages)\n print len(net_worths)\n return cleaned_data", "def mp_calc_cov_accum_stats(self, args):\n # initalises data array classes\n nlat = len(args[\"grid_lat\"])\n nlon = len(args[\"grid_lon\"])\n nbin = len(args[\"bins\"])\n depth_range = args[\"depth_range\"]\n source_types = args[\"source_types\"]\n cov_stats = arrays.CovSumStats((nlat, nlon, nbin))\n grid_stats = arrays.GridSumStats((nlat, nlon))\n\n # loop over files and calculate the accumulated stats\n for infile in args[\"list_of_files\"]:\n if depth_range:\n print(\"Processing file: {} | Depths: {} to {}\".format(infile, \n str(depth_range[0]), str(depth_range[1])))\n else:\n print(\"Processing file: {} | Depths: Surface variable\".format(infile))\n \n # Read fdbk variables \n fdbk_var_array, depths = IO.ncread_fdbk_vars(infile, \n args['obs_type'], args['source_types'])\n \n # For profiles pick a single observation at each depth range/latitude/longitude\n # This is done to avoid profiles being correlated with themselves\n if depth_range:\n fdbk_var_array = ObsProfiles.random_subsample_profiles(depths, \n depth_range, fdbk_var_array)\n \n if len(fdbk_var_array.lats)>0:\n if (np.min(args[\"grid_lon\"]) >= 0.):\n fdbk_var_array.lons[np.where(fdbk_var_array.lons < 0.)] = fdbk_var_array.lons + 360.\n \n # Need to squash obs and model arrays to be 1D array\n fdbk_var_array.mod_vals = fdbk_var_array.mod_vals.flatten()\n fdbk_var_array.obs_vals = fdbk_var_array.obs_vals.flatten()\n \n # Update stats with summed quantities for each call\n cov_stats, grid_stats = self.calc_cov_stats(args[\"grid_lat\"], args[\"grid_lon\"], \n args[\"bins\"], fdbk_var_array, cov_stats, grid_stats)\n \n return cov_stats, grid_stats", "def monitor(data_feeder):\n _total_time = 0.\n _costs = []\n _data_feeder = data_feeder(BATCH_SIZE,\n SEQ_LEN,\n OVERLAP,\n Q_LEVELS,\n Q_ZERO,\n Q_TYPE)\n\n for _seqs, _reset, _mask in _data_feeder:\n _start_time = time.time()\n _cost = test_fn(_seqs, _mask)\n _total_time += time.time() - _start_time\n\n _costs.append(_cost)\n\n return numpy.mean(_costs), _total_time" ]
[ "0.56539094", "0.52569467", "0.5236992", "0.5231127", "0.5104325", "0.5093013", "0.5085143", "0.5064352", "0.4961732", "0.49305794", "0.49301794", "0.4917983", "0.48857465", "0.4866057", "0.48591626", "0.48007303", "0.47892055", "0.47815204", "0.4772867", "0.47728154", "0.4765887", "0.47539708", "0.47531417", "0.47531208", "0.47527468", "0.47428644", "0.47381568", "0.47340143", "0.47275987", "0.47232923", "0.4705761", "0.47041965", "0.46904787", "0.4690453", "0.4680938", "0.4680071", "0.46650267", "0.46397677", "0.46165216", "0.45970604", "0.45958024", "0.45854744", "0.4579831", "0.4578013", "0.45772433", "0.45741013", "0.45696878", "0.45686442", "0.45671093", "0.4564886", "0.45643124", "0.45595396", "0.45574853", "0.45467007", "0.4541812", "0.45416346", "0.45412022", "0.4538582", "0.45295307", "0.45257917", "0.45217666", "0.45214403", "0.45199051", "0.45043203", "0.45022407", "0.4497297", "0.44875363", "0.4478523", "0.44714984", "0.44682863", "0.44670004", "0.44639727", "0.4463298", "0.44596487", "0.44574043", "0.44569972", "0.44565716", "0.44455698", "0.44439736", "0.44398335", "0.44393867", "0.44374645", "0.44370446", "0.4436005", "0.44304615", "0.44282803", "0.44193637", "0.44148123", "0.44144842", "0.4410041", "0.44095874", "0.4405763", "0.4404039", "0.44040072", "0.4403912", "0.44030222", "0.4401963", "0.43992597", "0.43977574", "0.43939534" ]
0.77393055
0
Expects a list of signals and a list of bkgs (Dataset objects), and a cut_function and cut_values.
def roccurve(signals, bkgs, cut_function, cut_values): eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values) eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs), cut_function, cut_values) return eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n range_answers=args['cut_finding']['range_answers'],\n n_bins=args['cut_finding']['n_bins'])\n return Cuts(values=values, names=names)\n\n if args['experiment']['cut_finding'] == CutFinding.Kernighan_Lin:\n\n values = kernighan_lin(A=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.kmodes:\n\n values = find_kmodes_cuts(xs=data.xs,\n max_nb_clusters=args['cut_finding']['max_nb_clusters'])\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.Fiduccia_Mattheyses:\n\n values = fid_mat(xs=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.linear:\n\n values, equations = linear_cuts(xs=data.xs,\n equations=args['cut_finding']['equations'],\n verbose=verbose)\n\n return Cuts(values=values, equations=equations)\n\n raise ValueError('Wrong name for a cut finding function')", "def get_events_passing_cuts(bolo_name, WIMP_mass, d_cut, analysis_type, MVA_tag, bin_X, min_X, max_X, list_variables, **kwargs): \n\n try:\n kwargs[\"weight_dir\"]\n except KeyError:\n sys.exit()\n\n #Get heat _fraction\n heat_fraction = kwargs[\"classifier_name\"][13:]\n\n #Get scaling dict to set the weights\n d_scaling = BDT_fh.open_MVA_scaling_file(bolo_name, analysis_type, MVA_tag)\n\n d_event_dir = {\"S1Pb\":\"Beta_and_Pb\", \"S2Pb\":\"Beta_and_Pb\", \"S1Beta\":\"Beta_and_Pb\", \"S2Beta\":\"Beta_and_Pb\",\n \"S1Gamma\":\"Gamma\", \"S2Gamma\":\"Gamma\", \"FidGamma\":\"Gamma\", \n \"heatonly_heat_fraction\" + heat_fraction: \"Heatonly\", \"WIMP_mass_\" + str(WIMP_mass): \"WIMP\"}\n key_heat = \"heatonly_heat_fraction\" + heat_fraction\n\n #Load data\n d_test = dp.get_data_array(bolo_name, 1, analysis_type, MVA_tag, d_event_dir.keys(), 1, list_variables, datasplit = 1)\n\n # Get classifier\n model_dir = script_utils.create_directory(\"../../Classifier_files/\" + bolo_name + \"/\" + analysis_type + \"/\"+ kwargs[\"weight_dir\"] + \"/\") \n if kwargs.has_key(\"classifier_name\"):\n modelfile = model_dir + \"xgboost_classifier_mass_\" + str(WIMP_mass) + \"_\" + kwargs[\"classifier_name\"] + \".model\"\n bst = xgb.Booster({'nthread':16}, model_file = modelfile)\n\n #Get predictions on test sample\n d_pred = {}\n d_hist = {}\n d_color = {\"S1Pb\":kOrange-8, \"S2Pb\":kOrange-9, \"S1Beta\":kGreen+2, \"S2Beta\":kGreen-3,\n \"S1Gamma\":kBlue-7, \"S2Gamma\":kBlue, \"FidGamma\":kAzure+10, key_heat: kRed, \"WIMP_mass_\" + str(WIMP_mass):kGray, \"neutron\":kMagenta}\n\n #ROOT out_dir \n root_dir = script_utils.create_directory(\"./ROOT_files/\" + bolo_name + \"/\" + analysis_type + \"/\")\n file_root = TFile(root_dir + bolo_name + \"_sensi_eff_curves_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".root\", \"read\")\n\n #Write events that pass cut to a file \n txt_dir = script_utils.create_directory(\"./Text_files/Simulated_sensitivity/\")\n with open(txt_dir + \"/simulated_events_passing_cut_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".txt\", \"w\") as fout:\n\n fout.write(\"heat_fraction,exposure,num_events_passing_cut\\n\")\n\n #Loop over possible exposure values\n for exposure in [10, 50, 100, 500]:\n script_utils.print_utility(\"Getting events passing cut for exposure of \" + str(exposure) + \" mass of \" + str(WIMP_mass))\n for event_type in d_test.keys():\n d_pred[event_type] = bst.predict( xgb.DMatrix(d_test[event_type].iloc[:,:-3].values) )\n d_hist[event_type] = TH1F(\"h\" + event_type + str(exposure), \"h\" + event_type + str(exposure), bin_X, min_X, max_X)\n PyRPl.fill_TH1(d_hist[event_type], d_pred[event_type])\n PyRPl.process_TH1(d_hist[event_type], use_fill_bool = True, color = d_color[event_type] )\n if \"WIMP\" not in event_type:\n d_hist[event_type].Scale(float(d_scaling[\"prop_\" + event_type])*float(d_scaling[\"exp_per_day\"])*exposure/float(d_hist[event_type].Integral()))\n else:\n d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Scale(1./d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral())\n\n list_hist_bckg =[d_hist[\"S1Pb\"], d_hist[\"S2Pb\"], d_hist[\"S1Beta\"], d_hist[\"S2Beta\"], d_hist[\"S1Gamma\"], d_hist[\"S2Gamma\"], d_hist[\"FidGamma\"], d_hist[key_heat]]\n\n hsum_bckg=TH1F(\"hsum_bckg\" + str(exposure),\"hsum_bckg\" + str(exposure), bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n sumcontent = sum([h.GetBinContent(i) for h in list_hist_bckg])\n hsum_bckg.SetBinContent(i, sumcontent)\n\n fsensi = file_root.Get(\"sensitivity_expo_\" + str(exposure))\n cut_val = fsensi.GetMinimumX(2,10)\n\n #Run Poisson simulations\n list_event_pass_cut=[]\n for nsimu in range(100):\n hdatasimu = TH1F(\"hdatasimu\",\"hdatasimu\", bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n hdatasimu.SetBinContent(i, np.random.poisson(hsum_bckg.GetBinContent(i)))\n bin_cut = hdatasimu.FindBin(cut_val)\n num_entry_cut = int(hdatasimu.Integral(bin_cut, max_X))\n list_event_pass_cut.append(str(num_entry_cut))\n del hdatasimu\n fout.write(heat_fraction[1:] + \",\" + str(exposure) + \",\" + \",\".join(list_event_pass_cut) + \"\\n\")", "def cut_eval(self, hits, *args):\n end = self.start_offset + self.train_window + self.predict_window\n return self.cut(hits, self.start_offset, end) + args", "def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Get signal efficieny once\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n # Perform some basic plotting setup\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n # Then efficiencies per bkg category (ttjets, qcd, ...)\n bkg_categories = list(set([ b.get_category() for b in bkgs ]))\n bkg_categories.sort()\n lines = {}\n for bkg_cat in bkg_categories:\n # Get Datasets that have this category\n bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ]\n # Compute efficiency in this category\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values)\n # Draw roccurve for this category\n line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)\n line.set_label(bkg_cat)\n # Save this line in a dict for potential outputting/modifying\n lines[bkg_cat] = line\n return ax", "def get_fidcuts():\n return combine_cuts([fid_cuts('muN_pt', 'muN_eta'),\n fid_cuts('muP_pt', 'muP_eta')])", "def bessel_bandpass_filter(data, lowcut, highcut, fs, order=2):\n\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n # bessel() and lfilter() are from scipy.signal\n\n b, a = bessel(order, [low, high], btype='band')\n y = lfilter(b, a, data)\n return y", "def cutflow(self, *names):\n for cut in names:\n if not isinstance(cut, str) or cut not in self._names:\n raise ValueError(\n \"All arguments must be strings that refer to the names of existing selections\"\n )\n\n masksonecut, maskscutflow = [], []\n for i, cut in enumerate(names):\n mask1 = self.any(cut)\n mask2 = self.all(*(names[: i + 1]))\n masksonecut.append(mask1)\n maskscutflow.append(mask2)\n\n if not self.delayed_mode:\n nevonecut = [len(self._data)]\n nevcutflow = [len(self._data)]\n nevonecut.extend(numpy.sum(masksonecut, axis=1))\n nevcutflow.extend(numpy.sum(maskscutflow, axis=1))\n\n else:\n nevonecut = [dask_awkward.count(self._data, axis=0)]\n nevcutflow = [dask_awkward.count(self._data, axis=0)]\n nevonecut.extend([dask_awkward.sum(mask1) for mask1 in masksonecut])\n nevcutflow.extend([dask_awkward.sum(mask2) for mask2 in maskscutflow])\n\n return Cutflow(\n names, nevonecut, nevcutflow, masksonecut, maskscutflow, self.delayed_mode\n )", "def apply_cuts(objects):\n #- Check if objects is a filename instead of the actual data\n if isinstance(objects, (str, unicode)):\n objects = io.read_tractor(objects)\n \n #- undo Milky Way extinction\n flux = unextinct_fluxes(objects)\n gflux = flux['GFLUX']\n rflux = flux['RFLUX']\n zflux = flux['ZFLUX']\n w1flux = flux['W1FLUX']\n wflux = flux['WFLUX']\n \n #- DR1 has targets off the edge of the brick; trim to just this brick\n if 'BRICK_PRIMARY' in objects.dtype.names:\n primary = objects['BRICK_PRIMARY']\n else:\n primary = np.ones(len(objects), dtype=bool)\n \n #----- LRG\n lrg = primary.copy()\n lrg &= rflux > 10**((22.5-23.0)/2.5)\n lrg &= zflux > 10**((22.5-20.56)/2.5)\n lrg &= w1flux > 10**((22.5-19.35)/2.5)\n lrg &= zflux > rflux * 10**(1.6/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n lrg &= w1flux * rflux.clip(0)**(1.33-1) > zflux.clip(0)**1.33 * 10**(-0.33/2.5)\n\n #----- ELG\n elg = primary.copy()\n elg &= rflux > 10**((22.5-23.4)/2.5)\n elg &= zflux > rflux * 10**(0.3/2.5)\n elg &= zflux < rflux * 10**(1.5/2.5)\n elg &= rflux**2 < gflux * zflux * 10**(-0.2/2.5)\n elg &= zflux < gflux * 10**(1.2/2.5)\n\n #----- Quasars\n psflike = ((objects['TYPE'] == 'PSF') | (objects['TYPE'] == 'PSF ')) \n qso = primary.copy()\n qso &= psflike\n qso &= rflux > 10**((22.5-23.0)/2.5)\n qso &= rflux < gflux * 10**(1.0/2.5)\n qso &= zflux > rflux * 10**(-0.3/2.5)\n qso &= zflux < rflux * 10**(1.1/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n qso &= wflux * gflux.clip(0)**1.2 > rflux.clip(0)**(1+1.2) * 10**(-0.4/2.5)\n ### qso &= wflux * gflux**1.2 > rflux**(1+1.2) * 10**(2/2.5)\n\n #------ Bright Galaxy Survey\n #- 'PSF' for astropy.io.fits; 'PSF ' for fitsio (sigh)\n bgs = primary.copy()\n bgs &= ~psflike\n bgs &= rflux > 10**((22.5-19.35)/2.5)\n\n #----- Standard stars\n fstd = primary.copy()\n fstd &= psflike\n fracflux = objects['DECAM_FRACFLUX'].T \n signal2noise = objects['DECAM_FLUX'] * np.sqrt(objects['DECAM_FLUX_IVAR'])\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for j in (1,2,4): #- g, r, z\n fstd &= fracflux[j] < 0.04\n fstd &= signal2noise[:, j] > 10\n\n #- observed flux; no Milky Way extinction\n obs_rflux = objects['DECAM_FLUX'][:, 2]\n fstd &= obs_rflux < 10**((22.5-16.0)/2.5)\n fstd &= obs_rflux > 10**((22.5-19.0)/2.5)\n #- colors near BD+17; ignore warnings about flux<=0\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n grcolor = 2.5 * np.log10(rflux / gflux)\n rzcolor = 2.5 * np.log10(zflux / rflux)\n fstd &= (grcolor - 0.32)**2 + (rzcolor - 0.13)**2 < 0.06**2\n\n #-----\n #- construct the targetflag bits\n #- Currently our only cuts are DECam based (i.e. South)\n desi_target = lrg * desi_mask.LRG_SOUTH\n desi_target |= elg * desi_mask.ELG_SOUTH\n desi_target |= qso * desi_mask.QSO_SOUTH\n\n desi_target |= lrg * desi_mask.LRG\n desi_target |= elg * desi_mask.ELG\n desi_target |= qso * desi_mask.QSO\n\n desi_target |= fstd * desi_mask.STD_FSTAR\n \n bgs_target = bgs * bgs_mask.BGS_BRIGHT\n bgs_target |= bgs * bgs_mask.BGS_BRIGHT_SOUTH\n\n #- nothing for MWS yet; will be GAIA-based\n mws_target = np.zeros_like(bgs_target)\n\n #- Are any BGS or MWS bit set? Tell desi_target too.\n desi_target |= (bgs_target != 0) * desi_mask.BGS_ANY\n desi_target |= (mws_target != 0) * desi_mask.MWS_ANY\n\n return desi_target, bgs_target, mws_target", "def filt_bp(sig: np.ndarray, Ss: int, Cfs0: int, Cfs1: None,\n order=5) -> np.ndarray:\n nyq = 0.5 * Ss\n normal_cutoff1 = Cfs0 / nyq\n normal_cutoff2 = Cfs1 / nyq\n b, a = butter(order, (normal_cutoff1, normal_cutoff2),\n btype='band',\n analog=False)\n return lfilter(b, a, sig)", "def butter_bp_coe(lowcut, highcut, fs, order=1):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='band')\n return b, a", "def ANN_binned_tagged_jets_hist(datalist, model, discriminant_cuts, CSV_cuts, bins, nbins, mode=\"pT_jet\",Save=False,addFeature=False):\n title = \"binned_tagged_jets_vs_\"+mode\n\tdiscriminant = \"ANN\"\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n print \"working on\",datatitle\n ran = data[4]\n\t\tCSV = data[2]\n\t\tpT = data[1]\n\t\tx_data = data[0]\n AllJetsHistlist.append(rt.TH1D(datatitle+\"_AllJets\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(datatitle+\"_CSV\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(datatitle+\"_Discriminant\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n\t\n\t\tif addFeature == False:\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\t\telif addFeature == \"pT\":\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\t\telif addFeature == \"PV\":\n\t\t\tassert x_data.shape[1] == 21, \"wrong x_data format: PV cannot be found\"\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\t\telse:\n\t\t\tprint \"invalid feature input\"\n\t\t\treturn None\n\t\tbin_numbers = ANN_bin_selection(pT,bins)\n\n\t for i,pT_value in enumerate(pT):\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJetsHistlist[n].Fill(pT_value)\n\t if pred_y[i] >= discriminant_cuts[bin_numbers[i]]: DiscriminantHistlist[n].Fill(pT_value)\n\t if CSV[i] >= CSV_cuts[bin_numbers[i]]: CSVHistlist[n].Fill(pT_value)\n\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n canvaslist.append(rt.TCanvas(datatitle+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(datatitle+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(mode)\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(title+\"_\"+datatitle+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\",\"recreate\"))\n print \"saved histogram as Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\"\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def targetFromSignals(obars, nbands=3, amount=1, targetprofit=15., stoploss=45.):\n # bandsg, yband, ask, bid, day, amount, targetprofit, stoploss\n bars = obars.copy()\n for j in range(nbands): # for each band traverse it\n ibandsg = bars.columns.get_loc('bandsg'+str(j))\n # being pessimistic ... right\n ybandsell = traverseSellBand(bars.iloc[:, ibandsg].values.astype(int),\n bars.H.values, bars.L.values, bars.date.values,\n amount, targetprofit, stoploss)\n ybandbuy = traverseBuyBand(bars.iloc[:, ibandsg].values.astype(int),\n bars.H.values, bars.L.values, bars.date.values,\n amount, targetprofit, stoploss)\n bars['y'+str(j)] = mergebandsignals(ybandsell, ybandbuy)\n\n return bars", "def runCutVals(df, eVal=0., windowSize = 2):\n\n dfg = df.groupby(['cpd1'])\n\n eMin = round(eVal - windowSize/2, 2)\n eMax = round(eMin + windowSize, 2)\n dFullPeakE, dFullBkgE = 0, 0\n dCutPeakE, dCutBkgE = 0, 0\n dFullPeakN, dFullBkgN = 0, 0\n dCutPeakN, dCutBkgN = 0, 0\n\n for name, g in dfg:\n valsFull = g['trapENFCal1'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values\n\n valsCut = g['trapENFCal1'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>=eMin) & (g['trapENFCal1']<=eMax)].values\n if name in enrDetList:\n dFullPeakE += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakE += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgE += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgE += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n elif name in natDetList:\n dFullPeakN += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakN += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgN += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgN += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n\n return dFullPeakE, dCutPeakE, dFullBkgE, dCutBkgE, dFullPeakN, dCutPeakN, dFullBkgN, dCutBkgN", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def __init__(self, *args):\n _BRepAlgo.BRepAlgo_Cut_swiginit(self,_BRepAlgo.new_BRepAlgo_Cut(*args))", "def efficient_binned_tagged_jets_hist(datalist,discriminant, discriminant_cuts, CSV_cuts, bins, nbins, Difference=False, mode=\"pT_jet\",Save=False):\n title = \"binned_tagged_jets_vs_\"+mode\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n print \"working on\",data[1]\n ran = data[2]\n AllJetsHistlist.append(rt.TH1D(data[1]+\"_AllJets\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(data[1]+\"_CSV\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(data[1]+\"_Discriminant\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n for particle in data[0]:\n bin_number = FCM.bin_selection(particle,bins)\n if bin_number == -100: continue\n AllJetsHistlist[n].Fill(particle[feature])\n if particle[1] >= CSV_cuts[bin_number]: CSVHistlist[n].Fill(particle[feature])\n if Difference:\n L = particle[8]-particle[5]\n else:\n if particle[17] != 0:\n L = particle[20]/float(particle[17])\n else:\n continue\n if L >= discriminant_cuts[bin_number]: DiscriminantHistlist[n].Fill(particle[feature])\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n canvaslist.append(rt.TCanvas(data[1]+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(data[1]+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(\"jet p_{T} (GeV)\")\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(title+\"_\"+data[1]+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\",\"recreate\"))\n print \"saved histogram as Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\"\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def cut(S, T, graph):\n ###TODO\n pass", "def butter_bandstop_filter(data, lowcut, highcut, fs, order):\n\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n i, u = sg.butter(order, (low, high), btype='bandstop')\n y = sg.filtfilt(i, u, data)\n return y", "def get_data_and_cuts(args):\n\n if args['verbose'] >= 2:\n print(\"Load data\\n\", flush=True)\n data = get_dataset(args)\n\n if args['verbose'] >= 2:\n print(\"Find cuts\", flush=True)\n cuts = get_cuts(data, args, verbose=args['verbose'])\n if args['verbose'] >= 2:\n print(f'\\tI found {len(cuts.values)} cuts\\n')\n\n print(\"Compute cost\", flush=True)\n cost_function = get_cost_function(data, args)\n cuts = compute_cost_and_order_cuts(cuts, cost_function)\n\n cuts = pick_cuts_up_to_order(cuts,\n percentile=args['experiment']['percentile_orders'])\n if args['verbose'] >= 2:\n max_considered_order = cuts.costs[-1]\n print(f\"\\tI will stop at order: {max_considered_order}\")\n print(f'\\tI will use {len(cuts.values)} cuts\\n', flush=True)\n\n if args['plot']['cuts']:\n if args['verbose'] >= 2:\n print(f\"\\tPlotting cuts\")\n\n plot_cuts(data, cuts,\n nb_cuts_to_plot=args['plot']['nb_cuts'],\n path=args['plot_dir'])\n\n return data, cuts", "def place(self, sig, bg_x, bg_y, cut_1_range, cut_2_range):\n assert bg_x.shape == bg_y.shape\n npts_1, npts_2 = bg_x.shape\n\n c1_bin_bounds = np.linspace(*cut_1_range, num=(npts_1 + 1))\n c1_bin = np.digitize([self._cut_1], c1_bin_bounds) - 1\n\n c2_bin_bounds = np.linspace(*cut_2_range, num=(npts_2 + 1))\n c2_bin = np.digitize([self._cut_2], c2_bin_bounds) - 1\n\n if any(b < 0 for b in [c1_bin, c2_bin]): \n raise ValueError(\"can't put a cut in the underflow bin\")\n \n eff = float(sig[c1_bin, c2_bin] / sig.max())\n\n def get_rej(bkg_array): \n array_val = bkg_array.max() / bkg_array[c1_bin, c2_bin]\n return float(array_val)\n rej_x, rej_y = [get_rej(ar) for ar in [bg_x, bg_y]]\n\n self._xyz = rej_x, rej_y, eff\n self._cut_ranges = (cut_1_range, cut_2_range)", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def cut(\n self,\n bins,\n **kwargs,\n ):\n\n def squeeze_and_cut(df, *args, **kwargs):\n # We need this function to ensure we squeeze our internal\n # representation (a dataframe) to a Series.\n series = df.squeeze(axis=1)\n return pandas.cut(series, *args, **kwargs)\n\n # We use `default_to_pandas` here since the type and number of\n # results can change depending on the input arguments.\n return self.default_to_pandas(squeeze_and_cut, bins, **kwargs)", "def callback_freq_cut(val):\n global plot_mode\n global idx_freq\n last_plot_mode = plot_mode\n plot_mode = 'freq_cut'\n# print( 'scale_freq', scale_freq)\n idx_freq = freq_to_idx( val, scale_freq )\n val_freq = idx_freq * scale_freq\n# print( 'val idx_freq val_freq', val, idx_freq, val_freq )\n update_num_shadow(int(sld['neighbors'].val))\n #plot 121\n lcutfreq.set_ydata( [val_freq, val_freq])\n lcuttime.set_alpha( 0.0 )\n lcutfreq.set_alpha( alpha_hm )\n #plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_freq )\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True])\n replot_light()\n reform_axis()\n \n fig.canvas.draw_idle()", "def get_buckets(self, first, last, num_buckets, hertz_cutoff=float(5)):\n # Pensar en la posibilidad de no aplicar PCA, permitir utilizar fft sobre una feature diferente, por ejemplo raiz-cuadrada(x2 + y2 + z2)\n if self.pca == True:\n pca = PCA(n_components=1, copy=True, whiten=True)\n numpy_data = array(self.data)\n transformed_dataset = PCA.fit_transform(pca, numpy_data)\n slice=transformed_dataset[first:last]\n else:\n slice = self.data[first:last]\n slice = [column[0] for column in slice]\n \n transformed = fft.fft(slice)\n absolute = [abs(complex) for complex in transformed]\n\n frequencies = self.get_frequencies()\n\n buckets = [0 for i in range(num_buckets)]\n width = hertz_cutoff / num_buckets\n sum_of_buckets = 0.0000001\n for i in range(1, len(absolute)):\n index = int(frequencies[i] / width)\n if index >= num_buckets:\n break\n buckets[index] += absolute[i]\n sum_of_buckets += absolute[i]\n\n #if args.normalize == 't':\n # buckets = map(lambda x: x/sum_of_buckets, buckets)\n\n return buckets", "def reformat_cuts(input_cuts):\n output_cuts = []\n for cut in input_cuts:\n cut = list(cut)\n if cut[1]==None:\n cut[1]=float(\"-inf\")\n if cut[2]==None:\n cut[2]=float(\"inf\")\n cut = tuple(cut)\n output_cuts.append(cut)\n return output_cuts", "def rawSignals(obars, window=21, nbands=3, inc=0.5, save=True):\n bars = obars.copy() # avoid warnings\n bars['OHLC'] = np.nan # typical price\n bars.OHLC.values[:] = np.mean(bars.values[:,0:4], axis=1) # 1000x faster\n price = bars.OHLC.values\n for i in range(nbands):\n upband, sma, lwband = ta.BBANDS(price, window*inc)\n if save: # for plotting stuff\n bars['bandlw'+str(i)] = lwband\n bars['bandup'+str(i)] = upband\n bars['bandsg'+str(i)] = 0 # signal for this band\n signals = fastbollingerSignal(price, upband, lwband)\n bars.loc[:, 'bandsg'+str(i)] = signals.astype(int) # signal for this band\n inc += 0.5\n bars.dropna(inplace=True)\n return bars", "def compute_cost_and_order_cuts(cuts, cost_function):\n\n cost_cuts = np.zeros(len(cuts.values), dtype=float)\n for i_cut, cut in enumerate(cuts.values):\n cost_cuts[i_cut] = cost_function(cut)\n idx = np.argsort(cost_cuts)\n\n cuts.values = cuts.values[idx]\n cuts.costs = cost_cuts[idx]\n if cuts.names is not None:\n cuts.names = cuts.names[idx]\n if cuts.equations is not None:\n cuts.equations = cuts.equations[idx]\n\n return cuts", "def get_sensi_eff_curves_various_exp(bolo_name, WIMP_mass, d_cut, analysis_type, MVA_tag, bin_X, min_X, max_X, list_variables, **kwargs): \n\n try:\n kwargs[\"weight_dir\"]\n except KeyError:\n sys.exit()\n\n #Get heat _fraction\n heat_fraction = kwargs[\"classifier_name\"][13:]\n\n #Get scaling dict to set the weights\n d_scaling = BDT_fh.open_MVA_scaling_file(bolo_name, analysis_type, MVA_tag)\n # print d_scaling\n\n d_event_dir = {\"S1Pb\":\"Beta_and_Pb\", \"S2Pb\":\"Beta_and_Pb\", \"S1Beta\":\"Beta_and_Pb\", \"S2Beta\":\"Beta_and_Pb\",\n \"S1Gamma\":\"Gamma\", \"S2Gamma\":\"Gamma\", \"FidGamma\":\"Gamma\", \n \"heatonly_heat_fraction\" + heat_fraction: \"Heatonly\", \"WIMP_mass_\" + str(WIMP_mass): \"WIMP\"}\n key_heat = \"heatonly_heat_fraction\" + heat_fraction\n\n #Load data\n d_test = dp.get_data_array(bolo_name, 1, analysis_type, MVA_tag, d_event_dir.keys(), 1, list_variables, datasplit = 1)\n\n # Get classifier\n model_dir = script_utils.create_directory(\"../../Classifier_files/\" + bolo_name + \"/\" + analysis_type + \"/\"+ kwargs[\"weight_dir\"] + \"/\") \n if kwargs.has_key(\"classifier_name\"):\n modelfile = model_dir + \"xgboost_classifier_mass_\" + str(WIMP_mass) + \"_\" + kwargs[\"classifier_name\"] + \".model\"\n bst = xgb.Booster({'nthread':16}, model_file = modelfile)\n\n #Get predictions on test sample\n d_pred = {}\n d_hist = {}\n d_color = {\"S1Pb\":kOrange-8, \"S2Pb\":kOrange-9, \"S1Beta\":kGreen+2, \"S2Beta\":kGreen-3,\n \"S1Gamma\":kBlue-7, \"S2Gamma\":kBlue, \"FidGamma\":kAzure+10, key_heat: kRed, \"WIMP_mass_\" + str(WIMP_mass):kGray, \"neutron\":kMagenta}\n\n #ROOT out_dir \n root_dir = script_utils.create_directory(\"./ROOT_files/\" + bolo_name + \"/\" + analysis_type + \"/\")\n file_root = TFile(root_dir + bolo_name + \"_sensi_eff_curves_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".root\", \"recreate\")\n\n #Loop over possible exposure values\n # for exposure in [10, 50, 100, 500]:\n for exposure in [66]:\n script_utils.print_utility(\"Getting sensi + eff for exposure of \" + str(exposure) + \" mass of \" + str(WIMP_mass))\n for event_type in d_test.keys():\n d_pred[event_type] = bst.predict( xgb.DMatrix(d_test[event_type].iloc[:,:-3].values) )\n d_hist[event_type] = TH1F(\"h\" + event_type + str(exposure), \"h\" + event_type + str(exposure), bin_X, min_X, max_X)\n PyRPl.fill_TH1(d_hist[event_type], d_pred[event_type])\n PyRPl.process_TH1(d_hist[event_type], use_fill_bool = True, color = d_color[event_type] )\n if \"WIMP\" not in event_type:\n d_hist[event_type].Scale(float(d_scaling[\"prop_\" + event_type])*float(d_scaling[\"exp_per_day\"])*exposure/float(d_hist[event_type].Integral()))\n else:\n d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Scale(8000./d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral())\n\n list_hist_bckg =[d_hist[\"S1Pb\"], d_hist[\"S2Pb\"], d_hist[\"S1Beta\"], d_hist[\"S2Beta\"], d_hist[\"S1Gamma\"], d_hist[\"S2Gamma\"], d_hist[\"FidGamma\"], d_hist[key_heat]]\n\n hsum_bckg=TH1F(\"hsum_bckg\" + str(exposure),\"hsum_bckg\" + str(exposure), bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n sumcontent = sum([h.GetBinContent(i) for h in list_hist_bckg])\n hsum_bckg.SetBinContent(i, sumcontent)\n\n # print hsum_bckg.Integral(hsum_bckg.FindBin(3.5), bin_X)\n # print d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].FindBin(3.5), bin_X)/d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral()\n\n hs=THStack(\"hs\", \"hs\")\n for hist in list_hist_bckg + [d_hist[\"WIMP_mass_\" + str(WIMP_mass)]]:\n hs.Add(hist)\n\n # cc = TCanvas(\"cc\", \"cc\")\n # h1=TH1F(\"h1\",\"h1\", bin_X, min_X, max_X)\n # PyRPl.process_TH1(h1, X_title=\"BDT ouput\", min_Y = 1E-1, max_Y = 20000)\n \n # gPad.SetLogy()\n # h1.Draw()\n # hs.Draw(\"same\")\n # raw_input()\n\n class Sensitivity:\n def __call__( self, x, par ):\n\n bin_number_sig = d_hist[\"WIMP_mass_\" + str(WIMP_mass)].FindBin(x[0])\n bin_number_bckg = hsum_bckg.FindBin(x[0])\n eff_sig = float(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral(bin_number_sig, bin_X))\n exp_bckg = hsum_bckg.Integral(bin_number_bckg, bin_X)\n\n vec_proba = [TMath.PoissonI(i, exp_bckg) for i in range(500)] \n lim_Poisson_bckg = np.sum(np.array([PoissonCL.compute_90CL_limit(i)*vec_proba[i] for i in range(500)]))\n\n if eff_sig<=0:\n return 1E10\n else:\n return lim_Poisson_bckg/eff_sig + par[0]\n\n class Signal_eff:\n def __call__( self, x, par ):\n\n bin_number = d_hist[\"WIMP_mass_\" + str(WIMP_mass)].FindBin(x[0])\n integ = float(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral(bin_number, bin_X))/float(d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral())\n return par[0] + integ\n\n h = TH1F(\"h\", \"h\",100, 0, 10)\n PyRPl.process_TH1(h, X_title = \"BDT cut\", Y_title = \"Sensitivity (a.u.)\")\n h.SetMinimum(1)\n h.SetMaximum(1E3)\n # h.Draw()\n\n fopt = TF1(\"sensitivity_expo_\" + str(exposure), Sensitivity(), 0,10, 1)\n fopt.SetParameter(0,0)\n fopt.SetNpx(100)\n # fopt.Draw(\"same\")\n\n fsig_eff = TF1(\"signal_eff_expo_\" + str(exposure), Signal_eff(), 0,10, 1)\n fsig_eff.SetParameter(0,0)\n fsig_eff.SetNpx(500)\n\n min_X = fopt.GetMinimumX(2,10)\n print \"signal eff\", fsig_eff.Eval(min_X)\n print \"bckg_exp\", hsum_bckg.Integral(hsum_bckg.FindBin(min_X), bin_X)\n\n # fopt.Write()\n # fsig_eff.Write()\n\n # gPad.SetLogy()\n # raw_input()\n # del h \n\n # file_root.Close()", "def geneffcut(energy, array, cutvals=hads, bins=BINS):\n binning = np.digitize(energy, bins) - 1\n binning[binning < 0] = 0.\n binning[binning >= len(bins)-1] = 0.\n hadeffcut = np.zeros(len(energy), dtype=bool)\n for i, cutval in enumerate(cutvals):\n binmask = binning == i\n hadeffcut[binmask] = array[binmask] < cutval\n binning = np.digitize(energy, bins) - 1\n binning[binning < 0] = -1\n binning[binning >= len(bins)-1] = -1\n hadeffcut[binning == -1] = 0\n\n return hadeffcut", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, axis=0): \n omega = 0.5 * fs\n low = lowcut / omega\n high = highcut / omega\n b, a = signal.butter(order, [low, high], btype='band')\n y = signal.lfilter(b, a, data, axis=0)\n return y", "def cut_train(self, hits, *args):\n n_days = self.predict_window + self.train_window\n # How much free space we have to choose starting day\n free_space = self.inp.data_days - n_days - self.back_offset - self.start_offset\n if self.verbose:\n lower_train_start = self.inp.data_start + pd.Timedelta(self.start_offset, 'D')\n lower_test_end = lower_train_start + pd.Timedelta(n_days, 'D')\n lower_test_start = lower_test_end - pd.Timedelta(self.predict_window, 'D')\n upper_train_start = self.inp.data_start + pd.Timedelta(free_space - 1, 'D')\n upper_test_end = upper_train_start + pd.Timedelta(n_days, 'D')\n upper_test_start = upper_test_end - pd.Timedelta(self.predict_window, 'D')\n print(f\"Free space for training: {free_space} days.\")\n print(f\" Lower train {lower_train_start}, prediction {lower_test_start}..{lower_test_end}\")\n print(f\" Upper train {upper_train_start}, prediction {upper_test_start}..{upper_test_end}\")\n # Random starting point\n offset = tf.random_uniform((), self.start_offset,self.start_offset + free_space+1, dtype=tf.int32, seed=self.rand_seed)\n end = offset + n_days\n # Cut all the things\n return self.cut(hits, offset, end) + args", "def __init__(self, predict_lowerbound: float, first_season: int, aug_num_cuts: int, aug_min_cuts_on: int,\n cdf_cutoff: float):\n super().__init__(CutLayer(MultiplyAggregateLayer(InnerAppearanceLayer(first_season, aug_num_cuts,\n aug_min_cuts_on, cdf_cutoff)), mean, 1.0, predict_lowerbound))", "def efficient_tagged_jets_hist(datalist,discriminant, discriminant_cut, CSV_cut, bins, Difference=False, mode=\"pT_jet\",Save=False):\n title = \"tagged_jets_vs_\"+mode\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n print \"working on\",data[1]\n ran = data[2]\n AllJetsHistlist.append(rt.TH1D(data[1]+\"_AllJets\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(data[1]+\"_CSV\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(data[1]+\"_Discriminant\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n for particle in data[0]:\n AllJetsHistlist[n].Fill(particle[feature])\n if particle[1] >= CSV_cut: CSVHistlist[n].Fill(particle[feature])\n if Difference:\n L = particle[8]-particle[5]\n else:\n if particle[13] != 0:\n L = particle[16]/float(particle[13])\n else:\n continue\n if L >= discriminant_cut: DiscriminantHistlist[n].Fill(particle[feature])\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n canvaslist.append(rt.TCanvas(data[1]+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(data[1]+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(mode)\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(\"Thesis_Plots/\"+title+\"_\"+data[1]+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\",\"recreate\"))\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def group_causality(sig_list, condition, freqs, ROI_labels=None,\n out_path=None, submount=10):\n print 'Running group causality...'\n set_directory(out_path)\n sig_caus = []\n\n for f in sig_list:\n sig_cau = np.load(f)\n print sig_cau.shape[-1]\n sig_caus.append(sig_cau)\n\n sig_caus = np.array(sig_caus)\n sig_group = sig_caus.sum(axis=0)\n plt.close()\n for i in xrange(len(sig_group)):\n fmin, fmax = freqs[i][0], freqs[i][1]\n cau_band = sig_group[i]\n # cau_band[cau_band < submount] = 0\n cau_band[cau_band < submount] = 0\n # fig, ax = pl.subplots()\n cmap = plt.get_cmap('hot', cau_band.max()+1-submount)\n cmap.set_under('gray')\n plt.matshow(cau_band, interpolation='nearest', vmin=submount, cmap=cmap)\n if ROI_labels == None:\n ROI_labels = np.arange(cau_band.shape[0]) + 1\n pl.xticks(np.arange(cau_band.shape[0]), ROI_labels, fontsize=9, rotation='vertical')\n pl.yticks(np.arange(cau_band.shape[0]), ROI_labels, fontsize=9)\n # pl.imshow(cau_band, interpolation='nearest')\n # pl.set_cmap('BlueRedAlpha')\n np.save(out_path + '/%s_%s_%sHz.npy' %\n (condition, str(fmin), str(fmax)), cau_band)\n v = np.arange(submount, cau_band.max()+1, 1)\n\n # cax = ax.scatter(x, y, c=z, s=100, cmap=cmap, vmin=10, vmax=z.max())\n # fig.colorbar(extend='min')\n\n plt.colorbar(ticks=v, extend='min')\n # pl.show()\n plt.savefig(out_path + '/%s_%s_%sHz.png' %\n (condition, str(fmin), str(fmax)), dpi=300)\n plt.close()\n return", "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)", "def cut_sig(self):\n c = TCut(self.cut_both)\n c += TCut(self._return_if('_cut_sig'))\n return c", "def get_pop_list_for_scaling(bolo_name, d_cut, data_dir, tree_name = \"t_merged\"):\n \n file_tree = TFile(data_dir+bolo_name+\"_lowmass_fond.root\")\n tree = file_tree.Get(tree_name)\n\n #Create background hist directory\n pop_path_name = script_utils.create_directory(\"../Analyse_\" + bolo_name + \"/Populations/Pop_for_scaling/\")\n\n #Load the estimator\n d_est = BDT_fh.open_estimator_file(bolo_name)\n d_std_true_events = BDT_fh.open_true_event_FWHM_file(bolo_name)\n\n #Best estimator for heat: coefficients\n coeff_EC1, coeff_EC2 = str(d_est[\"HEAT\"][:5]), str(1- float(d_est[\"HEAT\"][:5]))\n coeff_EIA, coeff_EIB = str(d_est[\"S1\"][:5]), str(1-float(d_est[\"S1\"][:5]))\n coeff_EIC, coeff_EID = str(d_est[\"S2\"][:5]), str(1-float(d_est[\"S2\"][:5]))\n \n sigma_IA = str(d_std_true_events[\"FWIA\"])\n sigma_IC = str(d_std_true_events[\"FWIC\"])\n sigma_IB = str(d_std_true_events[\"FWIB\"])\n sigma_ID = str(d_std_true_events[\"FWID\"])\n\n #Load standard cuts\n TCut_path_name = script_utils.create_directory(\"../Cut_files/\") \n TCut_file_name =\"TCuts.txt\" \n file_TCut =\"\" \n #Add an exception if the file does not exist\n try:\n file_TCut = script_utils.open_text_file(TCut_path_name, TCut_file_name , \"r\")\n except IOError: \n script_utils.print_utility(script_utils.COL(\"No such file, use get_standard_cuts.py first\",\"fail\"))\n sys.exit()\n \n # Load the cut values. \n list_file_TCut_lines =[line.rstrip().split(\",\") for line in file_TCut.readlines()]\n standard_cuts =\"\"\n # Add a boolean flag to check if the bolo has its cuts in the file\n is_bolo_in_file =False\n for line in list_file_TCut_lines:\n if bolo_name == line[0]:\n standard_cuts = line[1]\n is_bolo_in_file = True\n assert(is_bolo_in_file)\n\n \n l_all = TEventList(\"l_all\")\n l_heatonly = TEventList(\"l_heatonly\")\n \n l_FidGamma = TEventList(\"l_FidGamma\")\n l_S1Gamma = TEventList(\"l_S1Gamma\")\n l_S2Gamma = TEventList(\"l_S2Gamma\")\n \n l_S1Beta = TEventList(\"l_S1Beta\")\n l_S2Beta = TEventList(\"l_S2Beta\")\n \n l_S1Pb = TEventList(\"l_S1Pb\")\n l_S2Pb = TEventList(\"l_S2Pb\")\n\n\n string_EC = coeff_EC1 + \"*EC1_ERA+\" + coeff_EC2 + \"*EC2_ERA\"\n string_EI = coeff_EIB + \"*EIB+\" + coeff_EID + \"*EID\"\n\n standard_cuts = standard_cuts + \"&&KTH<1&&KTH>0\"\n heat_cut = str(d_cut[\"ECinf\"]) + \"<\" + string_EC + \"&&\" + str(d_cut[\"ECsup\"]) + \">\" + string_EC + \"&& abs(EC1_ERA-EC2_ERA)<1\"\n ion_cut = str(d_cut[\"EIinf\"]) + \"<\" + string_EI + \"&&\" + str(d_cut[\"EIsup\"]) + \">\" + string_EI\n veto_cut = \"EIA<\" + str(d_cut[\"sigma_vet\"]) + \"*\" + sigma_IA + \"&&\" + \"EIC<\" + str(d_cut[\"sigma_vet\"]) + \"*\" + sigma_IC\n \n # all_cuts = \"&&\".join([standard_cuts, heat_cut, ion_cut, veto_cut])\n all_cuts = \"&&\".join([standard_cuts, heat_cut, ion_cut, veto_cut])\n\n # print tree\n # print all_cuts.split(\"&&\")\n # raw_input()\n\n ###############################\n # All\n ###############################\n tree.Draw(\">>l_all\", all_cuts )\n pop_len = l_all.GetN()\n pop_file_name = bolo_name + \"_all_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_all.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close()\n\n ###############################\n # Heatonly\n ###############################\n tree.Draw(\">>l_heatonly\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID)\n pop_len = l_heatonly.GetN()\n pop_file_name = bolo_name + \"_heatonly_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_heatonly.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close()\n\n\n ##################################\n # G A M M A E V E N T S\n ##################################\n #Fiducial gammas\n tree.Draw(\">>l_FidGamma\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_FID\"] + \">0.7\")\n pop_len = l_FidGamma.GetN()\n pop_file_name = bolo_name + \"_FidGamma_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_FidGamma.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n #S1 gammas\n tree.Draw(\">>l_S1Gamma\",all_cuts + \" && EIA>2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S1\"] + \">0.65\")\n pop_len = l_S1Gamma.GetN()\n pop_file_name = bolo_name + \"_S1Gamma_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S1Gamma.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n #S2 gammas\n tree.Draw(\">>l_S2Gamma\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC>2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S2\"] + \">0.65\")\n pop_len = l_S2Gamma.GetN()\n pop_file_name = bolo_name + \"_S2Gamma_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S2Gamma.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n\n ##################################\n # B E T A E V E N T S\n ##################################\n #S1 beta\n tree.Draw(\">>l_S1Beta\",all_cuts + \" && EIA>2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S1\"] + \"<0.65 && \" + d_est[\"Q_S1\"] + \">0.2\")\n pop_len = l_S1Beta.GetN()\n pop_file_name = bolo_name + \"_S1Beta_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S1Beta.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n #S2 beta\n tree.Draw(\">>l_S2Beta\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC>2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S2\"] + \"<0.65 && \" + d_est[\"Q_S2\"] + \">0.2\")\n pop_len = l_S2Beta.GetN()\n pop_file_name = bolo_name + \"_S2Beta_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S2Beta.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n\n ##################################\n # P b E V E N T S\n ##################################\n # S1 Pb\n tree.Draw(\">>l_S1Pb\",all_cuts + \" && EIA>2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S1\"] + \"<0.15 &&\" + d_est[\"Q_S1\"] + \">0.04\")\n print \n pop_len = l_S1Pb.GetN()\n pop_file_name = bolo_name + \"_S1Pb_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S1Pb.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n # S2 Pb\n tree.Draw(\">>l_S2Pb\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC>2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S2\"] + \"<0.15 &&\" + d_est[\"Q_S2\"] + \">0.04\")\n pop_len = l_S2Pb.GetN()\n pop_file_name = bolo_name + \"_S2Pb_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S2Pb.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n\n list_list = [l_heatonly, l_FidGamma, l_S1Gamma, l_S2Gamma, l_S1Beta, l_S2Beta, l_S1Pb, l_S2Pb]\n list_num = [l.GetN() for l in list_list]\n list_ev = [\"heatonly\", \"FidGamma\", \"S1Gamma\", \"S2Gamma\", \"S1Beta\", \"S2Beta\", \"S1Pb\", \"S2Pb\"]\n for ev, num in zip(list_ev, list_num):\n print ev, num\n\n print \"all known\", sum(list_num)\n print \"all\", l_all.GetN()\n\n del l_all\n del l_heatonly\n\n del l_FidGamma\n del l_S1Gamma\n del l_S2Gamma \n\n del l_S1Beta \n del l_S2Beta \n\n del l_S1Pb \n del l_S2Pb", "def extract_waveforms(signal, fs, spikes_idx, pre, post):\n cutouts = []\n pre_idx = int(pre * fs)\n post_idx = int(post * fs)\n for index in spikes_idx:\n if index-pre_idx >= 0 and index+post_idx <= signal.shape[0]:\n cutout = signal[(index-pre_idx):(index+post_idx)]\n cutouts.append(cutout)\n return np.stack(cutouts)", "def seed_plots(self, bcut=5, subset=None, title=None):\n z = self.seeds if subset is None else self.seeds[subset]\n fig,axx= plt.subplots(1,3, figsize=(12,4))\n plt.subplots_adjust(left=0.1)\n bc = np.abs(z.b)<bcut\n histkw=dict(histtype='step', lw=2)\n def all_plot(ax, q, dom, label):\n ax.hist(q.clip(dom[0],dom[-1]),dom, **histkw)\n ax.hist(q[bc].values.clip(dom[0],dom[-1]),dom, color='orange', label='|b|<%d'%bcut, **histkw)\n plt.setp(ax, xlabel=label, xlim=(None,dom[-1]))\n ax.grid()\n ax.legend(prop=dict(size=10))\n all_plot(axx[0], z['size'], np.linspace(0.5,10.5,11), 'cluster size')\n all_plot(axx[1], z.ts, np.linspace(0,50,26), 'TS')\n all_plot(axx[2], np.sin(np.radians(z.b)), np.linspace(-1,1,41), 'sin(b)')\n axx[2].axvline(0, color='k')\n fig.suptitle('{} {} seeds from model {}'.format( len(z), self.tsname, self.input_model,)\n if title is None else title)\n fig.set_facecolor('white')\n return fig", "def get_eff(arrays_iterator, cut_function, cut_values):\n n_cuts = len(cut_values)\n n_total = np.zeros(n_cuts)\n n_pass = np.zeros(n_cuts)\n for arrays, dataset in arrays_iterator:\n weight = dataset.get_weight()\n for i_cut, cut in enumerate(cut_values):\n this_n_pass, this_n_total = cut_function(arrays, cut)\n n_total[i_cut] += weight * this_n_total\n n_pass[i_cut] += weight * this_n_pass\n # Basically n_pass / n_total, but returns 0 if n_total has a 0 somewhere\n eff = np.divide(n_pass, n_total, out=np.zeros_like(n_pass), where=n_total!=0)\n return eff, n_pass, n_total", "def generate_signals(symbol, period=default_period, std=default_std, refresh=False, start_date=config.start_date, end_date=config.end_date):\n\n bb(symbol, period, std, refresh=False, start_date=start_date, end_date=end_date)\n df = pd.read_csv(utils.get_file_path(config.ta_data_path, table_filename, symbol=symbol), index_col=\"Date\", parse_dates=[\"Date\"])[start_date:end_date]\n\n signal_column_name = get_signal_name(period=period, std=std)\n if signal_column_name not in df.columns:\n lower_column_name = \"Lower\"\n upper_column_name = \"Upper\"\n\n conditions = [\n ((df[\"Close\"].shift(1) > df[lower_column_name].shift(1)) & (df[\"Close\"] < df[lower_column_name])), # price crosses lower band; buy signal\n ((df[\"Close\"].shift(1) < df[upper_column_name].shift(1)) & (df[\"Close\"] > df[upper_column_name])), # price crosses upper band; sell signal\n False, # ((df[\"Close\"].shift(1) < df[\"Mid\"].shift(1)) & (df[\"Close\"] > df[\"Mid\"])) # bb breaches the mid line after a buy signal, soft sell\n False # ((df[\"Close\"].shift(1) > df[\"Mid\"].shift(1)) & (df[\"Close\"] < df[\"Mid\"])) # bb breaches the mid line after a sell signal, soft buy\n ]\n\n df[signal_column_name] = np.select(conditions, ta.signals, default=ta.default_signal)\n utils.debug(df[signal_column_name])\n df.to_csv(utils.get_file_path(config.ta_data_path, table_filename, symbol=symbol))\n\n return df[signal_column_name]", "def generate_cut_labels(var, bin_edges, bottom_inclusive=True):\n incl = '=' if bottom_inclusive else ''\n return ['{low:g} <{incl} {var} < {high:g}'.format(var=var, low=bin_low,\n high=bin_high, incl=incl)\n for (bin_low, bin_high) in bin_edges]", "def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)", "def add_bucketing_callbacks(self, X, y):\n app = self.app\n add_common_callbacks(self)\n\n @app.callback(\n [Output(\"input_map\", \"value\")],\n [\n Input(\"input_column\", \"value\"),\n ],\n )\n def update_input_map(col):\n \"\"\"Update bucketer map.\"\"\"\n input_map = self.features_bucket_mapping_.get(col).map\n col_type = self.features_bucket_mapping_.get(col).type\n\n if col_type == \"categorical\":\n # We also allow for treating numerical as categoricals\n # if key is a string, we'll need to quote them\n if isinstance(list(input_map.keys())[0], str):\n str_repr = \",\\n\\t\".join([f\"'{k}': {v}\" for k, v in input_map.items()])\n else:\n str_repr = \",\\n\\t\".join([f\"{k}: {v}\" for k, v in input_map.items()])\n str_repr = f\"{{\\n\\t{str_repr}\\n}}\"\n else:\n str_repr = str(input_map)\n return [str_repr]\n\n @app.callback(\n [Output(\"input_map_helptext\", \"children\")],\n [\n Input(\"input_column\", \"value\"),\n ],\n )\n def update_input_map_feedback(col):\n col_type = self.features_bucket_mapping_.get(col).type\n right = self.features_bucket_mapping_.get(col).right\n if col_type == \"categorical\":\n msg = \"Edit the prebucket mapping dictionary, e.g. {'value' : 'pre-bucket'}\"\n if col_type == \"numerical\" and right:\n msg = \"Edit the prebucket mapping boundaries. \"\n msg += \"Values up to and including the boundary are put into a bucket (right=True)\"\n if col_type == \"numerical\" and not right:\n msg = \"Edit the prebucket mapping boundaries. \"\n msg += \"Values up to but not including the boundary are put into a bucket (right=False)\"\n return [msg]\n\n @app.callback(\n [\n Output(\"bucket_table\", \"data\"),\n Output(\"graph-bucket\", \"figure\"),\n Output(\"input_map\", \"invalid\"),\n Output(\"input_map_feedback\", \"children\"),\n ],\n [Input(\"input_map\", \"value\")],\n [State(\"input_column\", \"value\")],\n )\n def get_bucket_table(input_map, col):\n \"\"\"Loads the table and the figure, when the input_map changes.\"\"\"\n col_type = self.features_bucket_mapping_.get(col).type\n\n # Load the object from text input into python object\n if col_type == \"numerical\":\n try:\n input_map = json.loads(input_map)\n assert len(input_map) > 0\n except Exception:\n msg = \"Make sure the input is properly formatted as a list\"\n return no_update, no_update, True, [msg]\n # validate input\n if not is_increasing(input_map):\n return no_update, no_update, True, [\"Make sure the list values are in increasing order\"]\n else:\n try:\n # note using ast.literal_eval is not safe\n # for use when you don't trust the user input\n # in this case, it's a local user using his/her own kernel\n # note: we're using literal_eval because JSON enforces quoted keys\n input_map = ast.literal_eval(input_map)\n # re-sort on value, key\n input_map = dict(sorted(input_map.items(), key=lambda x: (x[1], x[0])))\n except Exception:\n msg = \"Make sure the input is properly formatted as a dictionary\"\n return no_update, no_update, True, [msg]\n # validate input\n if not min(input_map.values()) == 0:\n msg = \"Dictionary values (buckets) must start at 0\"\n return no_update, no_update, True, [msg]\n if not is_sequential(list(input_map.values())):\n msg = \"Dictionary values (buckets) must be sequentially increasing with steps of 1\"\n return no_update, no_update, True, [msg]\n\n # Update the fit for this specific column\n special = self.features_bucket_mapping_.get(col).specials\n right = self.features_bucket_mapping_.get(col).right\n # Note we passed X, y to add_bucketing_callbacks() so they are available here.\n # make sure to re-generate the summary table\n self._update_column_fit(\n X=X, y=y, feature=col, special=special, splits=input_map, right=right, generate_summary=True\n )\n\n # Retrieve the new bucket tables and plots\n table = self.bucket_table(col)\n # unsupervised bucketers don't have an event rate.\n if \"Event Rate\" in table.columns:\n table[\"Event Rate\"] = round(table[\"Event Rate\"] * 100, 2)\n fig = self.plot_bucket(col)\n # remove title from plot\n fig.update_layout(title=\"\")\n return table.to_dict(\"records\"), fig, False, no_update", "def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Plot the base line\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n # Plot the single roccurve\n line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax)\n line.set_label(bkgs[0].get_category())\n # Plot settings\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n return ax", "def cchalf(dataframe, function, bins):\n dist = dataframe.set_index(['H', 'K', 'L'])['D'].drop_duplicates()\n dmin = dist.min()\n dmax = dist.max()\n binedges = np.linspace(dmin**-2, dmax**-2, bins+1)**-0.5\n binedges = list(zip(binedges[:-1], binedges[1:]))\n a,b = split(dataframe)\n xval_a, xval_b = function(a), function(b)\n#TODO: Fix this awful hack\n key = [i for i in xval_a if i!='D'][0]\n xval_a, xval_b = xval_a.join(dist),xval_b.join(dist)\n idx = xval_a.index.intersection(xval_b.index)\n xval_a,xval_b = xval_a.loc[idx],xval_b.loc[idx]\n cchalf = []\n for dmin,dmax in binedges:\n idx = (xval_a['D'] > dmin) & (xval_a['D'] < dmax)\n a = np.array(xval_a[idx][key]).flatten()\n b = np.array(xval_b[idx][key]).flatten()\n cchalf.append(np.corrcoef(a,b)[0, 1])\n return cchalf, binedges", "def setCutFile(self, cutfile):\n self.cutfile = cutfile\n self.cuts = {}\n with open(cutfile) as f:\n for l in f:\n llist = l.strip().split()\n cut = str(int(llist[0])).zfill(2)\n seq = cutinfo11[cut][1]\n self.cuts[cut] = [readQIE.sequences(elem,seq) for elem in llist[1:]]", "def component_func(B, C, F, list_funcs, len_kegg, threshold=0.05):\n\n size_label = 18\n size_tick = 18\n sns.set_style(\"darkgrid\")\n\n list_funcs = list_funcs[0:len_kegg]\n B = B[0:len_kegg]\n C = C[0:len_kegg]\n\n idx_p = [idx*2+0 for idx in range(B.shape[1]/2)]\n idx_m = [idx*2+1 for idx in range(B.shape[1]/2)]\n is_p = F[:,idx_p].mean(axis=1) - F[:,idx_m].mean(axis=1) > threshold\n is_m = F[:,idx_m].mean(axis=1) - F[:,idx_p].mean(axis=1) > threshold\n is_p = [idx for idx, val in enumerate(is_p) if val]\n is_m = [idx for idx, val in enumerate(is_m) if val]\n is_n = [idx for idx in range(C.shape[1]) if (idx not in is_p) and (idx not in is_m)]\n\n fig = plt.figure(figsize=(6, 6))\n\n for idx_c in range(C.shape[1]):\n clone_num = idx_c +1\n if idx_c in is_p:\n color=\"green\"\n label = \"C\"+str(clone_num)+\"|P\"\n elif idx_c in is_m:\n color=\"red\"\n label = \"C\"+str(clone_num)+\"|M\"\n else:\n color = \"royalblue\"\n label = \"C\"+str(clone_num)\n\n if clone_num in [1]:\n marker = \"$1$\"\n elif clone_num in [2]:\n marker = \"$2$\"\n elif clone_num in [3]:\n marker = \"$3$\"\n elif clone_num in [4]:\n marker = \"$4$\"\n elif clone_num in [5]:\n marker = \"$5$\"\n elif clone_num in [6]:\n marker = \"$6$\"\n else:\n print(\"error\")\n plt.plot(C[:,idx_c],range(len_kegg), color=color, marker=marker, markersize=10, linestyle=\"\",label=label, alpha=0.5)\n\n plt.yticks(range(len_kegg), list_funcs)\n plt.xlabel(\"Pathway strength\", fontsize=size_label)\n plt.legend()\n plt.tick_params(labelsize=size_tick-4)\n plt.show()\n ##fig.savefig(\"figures/fig7compfunc.pdf\", bbox_inches=\"tight\")", "def process_data(data, model, cgenes, cutoff, max_outliers, csample = None):\n\n\t#Transforms certain columns from string to numeric\n\tcols = ['CT','Quantity']\n\tdata[cols] = data[cols].apply(pandas.to_numeric, errors='coerce')\n\n\n\t#Marks the Control Genes in a new column in the dataframe\n\tdata['Control'] = data['Target Name'].apply(lambda x: True if str(x) in cgenes else False)\n\n\n\t# Create column 'Ignore' in dataframe to mark rows with NaN values in certain columns \n\tdata['Ignore'] = False\n\tcols = ['Sample Name', 'Target Name', 'Task', 'Reporter', 'CT']\n\tfor col in cols:\n\t\tdata.loc[data[col].isnull(), 'Ignore'] = True\n\n\t\n\n\t# Calls the different processing models depending on the model argument\n\tif model == 'absolute':\n\t\tdata = cleanup_outliers(data, \"Quantity\", cutoff, max_outliers)\n\t\tresults = process_absolute(data)\n\t\n\telif model == 'relative':\n\t\tdata = cleanup_outliers(data, \"CT\", cutoff, max_outliers)\n\t\tresults = process_relative(data)\n\n\telif model == 'stability':\n\t\tdata = cleanup_outliers(data, \"CT\", cutoff, max_outliers)\n\t\tresults = process_stability(data, csample)\n\n\treturn results", "def bin_discretize(self, variables=[], bins=3,\n min_const_samples_bin_size=1.0/3):\n self.edges=np.zeros((self.arity.size,bins+1))\n for i in variables:\n un_cnt=np.unique(self.data[:,i],return_counts=True)\n constvals=un_cnt[0][un_cnt[1]>self.data.shape[0]*min_const_samples_bin_size]\n mask=np.ones(self.data.shape[0],dtype=bool)\n if constvals.size>0:\n for j,cv in enumerate(constvals):\n mask*=(self.data[:,i]!=cv)\n self.data[self.data[:,i]==cv,i]=j\n\n size=np.sum(mask)/bins\n sorted_i=np.argsort(self.data[mask,i])\n edges=[self.data[mask,i][sorted_i[int(size*num)-1]] for num in range(1,bins)]\n self.edges[i]=[self.data[mask,i][sorted_i[0]]]+edges+[self.data[mask,i][sorted_i[-1]]]\n self.data[mask,i]=np.searchsorted(edges,self.data[mask,i])+constvals.size\n self.arity[i]=len(edges)+1+constvals.size", "def bookAnalysisJobs(config, cuts, aliases=QFramework.TQTaggable()):\n\n # boolean variable to keep track of whether we are using cutbased or MVA\n mva = config.getTagVString(\"MVA\")\n cutbased = (len(mva)==0)\n\n # if no aliases directly provided,\n if aliases.getNTags() < 1:\n # see if there are any in the config\n aliases.importTagsWithoutPrefix(config,\"cutParameters.\")\n aliases.importTagsWithoutPrefix(config,\"aliases.\")\n\n # TODO: modularize booking of each type of analysis job?\n\n #book cutflows\n if config.getTagBoolDefault(\"cutbased.makeCutflow\",cutbased):\n QFramework.INFO(\"booking cutflow\")\n cutflowjob = QFramework.TQCutflowAnalysisJob(\"cutflowJob\")\n cuts.addAnalysisJob(cutflowjob,\"*\")\n cutbased = True\n\n # TODO: book xAOD skimming here? (worst case just implement in HWW)\n xAODdumpingConfig = QFramework.TQTaggable()\n dumpXAODs = (xAODdumpingConfig.importTagsWithoutPrefix(config,\"xAODdumping.\") > 0)\n if dumpXAODs :\n try:\n flaggingJob = ROOT.TQEventFlaggingAnalysisJob()\n if xAODdumpingConfig.hasTag(\"flagName\"): flaggingJob.setFlagName(xAODdumpingConfig.getTagStringDefault(\"flagName\",\"\"))\n flaggingCuts = xAODdumpingConfig.getTagStringDefault(\"cuts\",\"\")\n print(\"Booking event flagging jobs at cuts: {:s}\".format(flaggingCuts.Data()))\n cuts.addAnalysisJob(flaggingJob,flaggingCuts)\n except NameError:\n QFramework.ERROR(\"Cannot schedule xAOD dumping, required classes are not in your version of CAFCore. Please consider updating CAFCore\")\n\n # add the event flagging for possible unfolding\n unfoldingConfig = QFramework.TQTaggable()\n unfolding = (unfoldingConfig.importTagsWithoutPrefix(config,\"unfolding.\") > 0)\n if unfolding :\n #add a suffix to the cut names for the flags. This is needed to prevent cross talk between channels!\n unfoldingFlagSuffix = config.getTagStandardStringDefault(\"~flagSuffix\",\"_$(cand)\")\n unfoldingCuts = unfoldingConfig.getTagVString(\"flagcuts\")\n for cutName in unfoldingCuts:\n fullCutNames = cuts.getCutNames(cutName)\n for fullCutName in fullCutNames:\n unfoldingJob = ROOT.TQEventFlaggingAnalysisJob()\n flagName = fullCutName + unfoldingFlagSuffix\n unfoldingJob.setFlagName(flagName)\n cuts.addAnalysisJob(unfoldingJob,fullCutName)\n\n\n #book histograms (TH1, TH2, TH3, TProfiles,...)\n if ( config.hasTag(\"histograms.0\") or config.hasTag(\"histograms\") ) and config.getTagBoolDefault(\"makeHistograms\",cutbased):\n QFramework.INFO(\"booking histograms\")\n histofiles = config.getTagVString(\"histograms\")\n histofiles = common.findMultipleConfigPathsFromList(histofiles)\n if QFramework.TQHistoMakerAnalysisJob.importJobsFromTextFiles(histofiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printHistograms\",False)) > 0:\n print(QFramework.TQHistoMakerAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book multi-dim histograms (THn based, i.e., for nDim>3)\n if ( config.hasTag(\"multidimHistograms\") ) and config.getTagBoolDefault(\"makeHistograms\",cutbased):\n QFramework.INFO(\"booking multidimensional histograms\")\n histofiles = config.getTagVString(\"multidimHistograms\")\n histofiles = common.findMultipleConfigPathsFromList(histofiles)\n if QFramework.TQTHnBaseMakerAnalysisJob.importJobsFromTextFiles(histofiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printHistograms\",False)) > 0:\n print(QFramework.TQTHnBaseMakerAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book graphs\n if ( config.hasTag(\"graphs.0\") or config.hasTag(\"graphs\") ) and config.getTagBoolDefault(\"makeGraphs\",cutbased):\n QFramework.INFO(\"booking graphs\")\n graphfiles = config.getTagVString(\"graphs\")\n graphfiles = findMultipleConfigPathsFromList(graphfiles)\n if QFramework.TQGraphMakerAnalysisJob.importJobsFromTextFiles(graphfiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printGraphs\",False)) > 0:\n print(QFramework.TQGraphMakerAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book event lists\n if ( config.hasTag(\"eventlists.0\") or config.hasTag(\"eventlists\") ) and config.getTagBoolDefault(\"makeEventLists\",cutbased):\n QFramework.INFO(\"booking eventlists\")\n evtlistfiles = config.getTagVString(\"eventlists\")\n evtlistfiles = common.findMultipleConfigPathsFromList(evtlistfiles)\n if QFramework.TQEventlistAnalysisJob.importJobsFromTextFiles(evtlistfiles,cuts,aliases,\"*\") > 0:\n print(QFramework.TQEventlistAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book Ntuple dumping\n if ( config.hasTag(\"ntuples.0\") or config.hasTag(\"ntuples\") ) and config.getTagBoolDefault(\"dumpNtuples\",cutbased):\n QFramework.INFO(\"preparing to dump ntuples\")\n ntupfiles = config.getTagVString(\"ntuples\")\n ntupfiles = common.findMultipleConfigPathsFromList(ntupfiles)\n if QFramework.TQNTupleDumperAnalysisJob.importJobsFromTextFiles(ntupfiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printNTuples\",False)) > 0:\n print(QFramework.TQNTupleDumperAnalysisJob.getErrorMessage());\n cutbased = True\n\n runtime = config.getFolder(\"runtime+\")\n runtime.setTagBool(\"cutbased\", cutbased)\n\n return", "def cutting(args):\n import numpy as np\n import h5py\n\n # Read in map data\n with h5py.File(args.pointmap, 'r') as f:\n ptmap = f['map'][...]\n\n if args.threshold > 0:\n cut_map = np.where(ptmap<args.threshold, 0, ptmap)\n else:\n idx = np.unravel_index(np.argmax(ptmap), ptmap.shape) # the index of the max element\n cut_map = np.zeros_like(ptmap)\n cut_map[idx] = ptmap[idx]\n\n # Create output image file name\n if args.outfile:\n out_file = args.outfile\n elif args.threshold > 0:\n out_file = ((args.pointmap.split('/')[-1]).split('.')[0]).replace('sim', 'cut') + '_' + str(int(args.threshold)) + '.hdf5'\n else:\n out_file = ((args.pointmap.split('/')[-1]).split('.')[0]).replace('sim', 'cut') + '_max.hdf5'\n\n # Save cut data\n with h5py.File(out_file, 'w') as f:\n f.create_dataset('map', data=cut_map)\n\n print 'done!'", "def Fill(self, *args, **kwargs):\n self._varexp = kwargs.get(\"varexp\")\n self._cuts = kwargs.get(\"cuts\", [])\n self._weight = kwargs.get(\"weight\", \"1\")\n if len(args) == 1 and isinstance(args[0], (str, unicode)):\n IOManager.FillHistogram(self, args[0], **kwargs)\n if not kwargs.get(\"append\", False):\n self._errorband.Reset()\n self._errorband.Add(self)\n else:\n super(Histo1D, self).Fill(*args)", "def test_1d_cut():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test_cut.ft\")\n assert data.shape == (2766,)\n assert data.dtype == 'float32'\n assert round(data[0],2) == -12123.67\n assert round(data[1],2) == -8979.31\n assert round(data[100],2) == -7625.30\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[278.59, 10.03])", "def eval_BC(Teff,logg,FeH,filt=\"g\",allBCs=None):\n if allBCs is None: allBCs = read_bc_table()\n \n BCs = allBCs[filt]\n \n points = np.atleast_2d([np.ravel(Teff),np.ravel(logg),np.ravel(FeH)]).T\n points[points[:,2] < -2.5,2] = -2.5\n out = interpolate.griddata(BCs[:,0:3], BCs[:,3], points, method='linear')\n return out", "def choose_split(data,treshold):\n n_features = len(data[0]) - 1 # number of columns\n quest_gain = [] # keep track of the gains and questions\n\n for col in range(1,n_features): # for each feature\n values = set([row[col] for row in data]) # unique values in the column\n for val in values: # for each value\n question = Question(col, val)\n \n # try splitting the dataset\n true_rows, false_rows = partition(data, question)\n\n # Skip this split if it doesn't divide the dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(data, true_rows, false_rows)\n quest_gain.append(Question_gain(gain,question))\n\n possible_question = [] # possible questions to ask\n n_quest_gain = len(quest_gain)\n\n if n_quest_gain == 0:\n return float('Inf'), float('NaN') #\n\n for x in range(n_quest_gain):\n if (quest_gain[x].gain >= treshold):\n possible_question.append(Question_gain(quest_gain[x].gain,quest_gain[x].question))\n \n n_possible_question = len(possible_question)\n if n_possible_question == 0:\n return float('Inf'), float('NaN')\n\n if n_possible_question>=2:\n [i, j] = random.sample(range(0, n_possible_question), 2)\n else:\n i = j = random.randint(0,n_possible_question-1)\n\n if possible_question[i].gain>=possible_question[j].gain:\n return possible_question[i].gain, possible_question[i].question\n else:\n return possible_question[j].gain, possible_question[j].question", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=2):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n \n # butter() and lfilter() are from scipy.signal\n \n b, a = butter(order, [low, high], btype='band')\n y = lfilter(b, a, data)\n return y", "def fid_cuts(ptname, etaname):\n cuts = []\n cuts.append(combine_cuts([ptname + ' > 4.5',\n 'TMath::Abs(' + etaname + ') < 1.2']))\n cuts.append(combine_cuts([ptname + ' > 4.0',\n var_selection('TMath::Abs('+etaname+')', 1.2, 1.4)\n ]))\n cuts.append(combine_cuts([ptname + ' > 3.5',\n var_selection('TMath::Abs('+etaname+')', 1.4, 1.6)\n ]))\n return combine_cuts(cuts, ' || ')", "def cutout(od,\n varList = None,\n YRange = None,\n XRange = None,\n add_Hbdr = False,\n mask_outside = False,\n ZRange = None,\n add_Vbdr = False,\n timeRange = None,\n timeFreq = None,\n sampMethod = 'snapshot',\n dropAxes = False):\n \n # Check\n for wrong_dim in ['mooring', 'station', 'particle']:\n if wrong_dim in od._ds.dims and (XRange is not None or YRange is not None):\n raise ValueError('`cutout` cannot subsample in the horizontal plain oceandatasets with dimension [{}]'.format(wrong_dim))\n \n # Convert variables to numpy arrays and make some check\n if not isinstance(od, _ospy.OceanDataset):\n raise TypeError('`od` must be OceanDataset')\n \n if varList is not None:\n varList = _np.asarray(varList, dtype='str')\n if varList.ndim == 0: varList = varList.reshape(1)\n elif varList.ndim >1: raise TypeError('Invalid `varList`')\n \n if not isinstance(add_Hbdr, (float, int, bool)):\n raise TypeError('`add_Hbdr` must be float, int, or bool')\n \n if not isinstance(mask_outside, bool):\n raise TypeError('`add_Hbdr` must be bool')\n \n if YRange is not None:\n YRange = _np.asarray(YRange, dtype=od._ds['YG'].dtype)\n if YRange.ndim == 0: YRange = YRange.reshape(1)\n elif YRange.ndim >1: raise TypeError('Invalid `YRange`')\n Ymax = od._ds['YG'].max().values\n Ymin = od._ds['YG'].min().values\n if any(YRange<Ymin) or any(YRange>Ymax):\n _warnings.warn(\"\\nThe Y range of the oceandataset is: {}\"\n \"\\nYRange has values outside the oceandataset range.\".format([Ymin, Ymax]), stacklevel=2)\n \n if XRange is not None:\n XRange = _np.asarray(XRange, dtype=od._ds['XG'].dtype)\n if XRange.ndim == 0: XRange = XRange.reshape(1)\n elif XRange.ndim >1: raise TypeError('Invalid `XRange`')\n Xmax = od._ds['XG'].max().values\n Xmin = od._ds['XG'].min().values\n if any(XRange<Xmin) or any(XRange>Xmax):\n _warnings.warn(\"\\nThe X range of the oceandataset is: {}\"\n \"\\nXRange has values outside the oceandataset range.\".format([Xmin, Xmax]), stacklevel=2)\n if ZRange is not None:\n ZRange = _np.asarray(ZRange, dtype=od._ds['Zp1'].dtype)\n if ZRange.ndim == 0: ZRange = ZRange.reshape(1)\n elif ZRange.ndim >1: raise TypeError('Invalid `ZRange`')\n Zmax = od._ds['Zp1'].max().values\n Zmin = od._ds['Zp1'].min().values\n if any(ZRange<Zmin) or any(ZRange>Zmax):\n _warnings.warn(\"\\nThe Z range of the oceandataset is: {}\"\n \"\\nZRange has values outside the the oceandataset range.\".format([Zmin, Zmax]), stacklevel=2)\n \n if timeRange is not None:\n timeRange = _np.asarray(timeRange, dtype=od._ds['time'].dtype)\n if timeRange.ndim == 0: timeRange = timeRange.reshape(1)\n elif timeRange.ndim >1: raise TypeError('Invalid `timeRange`')\n timemax = od._ds['time'].max().values\n timemin = od._ds['time'].min().values\n if any(timeRange<timemin) or any(timeRange>timemax):\n _warnings.warn(\"\\nThe time range of the oceandataset is: {}\"\n \"\\ntimeRange has values outside the the oceandataset range.\".format([timemin, timemax]), stacklevel=2)\n \n if not isinstance(timeFreq, (str, type(None))):\n raise TypeError('`timeFreq` must None or str')\n \n sampMethod_list = ['snapshot', 'mean']\n if sampMethod not in sampMethod_list:\n raise ValueError('[{}] is not an available `sampMethod`.'\n '\\nOptions: {}'.format(sampMethod, sampMethod_list))\n \n if not isinstance(dropAxes, bool):\n dropAxes = _np.asarray(dropAxes, dtype='str')\n if dropAxes.ndim == 0: dropAxes = dropAxes.reshape(1)\n elif dropAxes.ndim >1: raise TypeError('Invalid `dropAxes`')\n axis_error = [axis for axis in dropAxes if axis not in od.grid_coords]\n if len(axis_error)!=0:\n raise ValueError('{} are not in od.grid_coords and can not be dropped'.format(axis_error))\n dropAxes = {d: od.grid_coords[d] for d in dropAxes}\n elif dropAxes is True:\n dropAxes = od.grid_coords\n if YRange is None : dropAxes.pop('Y', None)\n if XRange is None : dropAxes.pop('X', None)\n if ZRange is None : dropAxes.pop('Z', None)\n if timeRange is None: dropAxes.pop('time', None)\n else:\n dropAxes = {}\n \n # Message\n print('Cutting out the oceandataset.')\n \n # Copy\n od = _copy.copy(od)\n \n # Unpack\n ds = od._ds\n periodic = od.grid_periodic\n \n # ---------------------------\n # Horizontal CUTOUT\n # ---------------------------\n \n if add_Hbdr is True:\n add_Hbdr = (_np.mean([_np.fabs(od._ds['XG'].max() - od._ds['XG'].min()),\n _np.fabs(od._ds['YG'].max() - od._ds['YG'].min())]) / \n _np.mean([len(od._ds['X']), len(od._ds['Y'])]))\n elif add_Hbdr is False:\n add_Hbdr = 0\n \n if add_Vbdr is True:\n add_Vbdr = _np.fabs(od._ds['Zp1'].diff('Zp1')).max().values\n elif add_Vbdr is False:\n add_Vbdr = 0\n \n # Initialize horizontal mask\n if XRange is not None or YRange is not None:\n maskH = _xr.ones_like(ds['XG'])\n\n if YRange is not None: \n # Use arrays\n YRange = _np.asarray([_np.min(YRange)-add_Hbdr, _np.max(YRange)+add_Hbdr]).astype(ds['YG'].dtype)\n\n # Get the closest \n for i, Y in enumerate(YRange):\n diff = _np.fabs(ds['YG']-Y)\n YRange[i] = ds['YG'].where(diff==diff.min()).min().values \n maskH = maskH.where(_np.logical_and(ds['YG']>=YRange[0], ds['YG']<=YRange[-1]), 0)\n maskHY = maskH\n\n if XRange is not None:\n # Use arrays\n XRange = _np.asarray([_np.min(XRange)-add_Hbdr, _np.max(XRange)+add_Hbdr]).astype(ds['XG'].dtype)\n\n # Get the closest \n for i, X in enumerate(XRange):\n diff = _np.fabs(ds['XG']-X)\n XRange[i] = ds['XG'].where(diff==diff.min()).min().values \n maskH = maskH.where(_np.logical_and(ds['XG']>=XRange[0], ds['XG']<=XRange[-1]), 0)\n\n # Can't be all zeros\n if maskH.sum()==0: raise ValueError('Zero grid points in the horizontal range')\n\n # Find horizontal indexes\n maskH['Yp1'].values = _np.arange(len(maskH['Yp1']))\n maskH['Xp1'].values = _np.arange(len(maskH['Xp1']))\n dmaskH = maskH.where(maskH, drop=True)\n dYp1 = dmaskH['Yp1'].values\n dXp1 = dmaskH['Xp1'].values\n iY = [_np.min(dYp1), _np.max(dYp1)]\n iX = [_np.min(dXp1), _np.max(dXp1)]\n maskH['Yp1'] = ds['Yp1']\n maskH['Xp1'] = ds['Xp1']\n \n # Original length\n lenY = len(ds['Yp1'])\n lenX = len(ds['Xp1']) \n \n # Indexis\n if iY[0]==iY[1]:\n if 'Y' not in dropAxes:\n if iY[0]>0: iY[0]=iY[0]-1\n else: iY[1]=iY[1]+1\n else: dropAxes.pop('Y', None)\n \n\n if iX[0]==iX[1]:\n if 'X' not in dropAxes:\n if iX[0]>0: iX[0]=iX[0]-1\n else: iX[1]=iX[1]+1\n else: dropAxes.pop('X', None)\n \n # Cutout\n ds = ds.isel(Yp1 = slice(iY[0], iY[1]+1),\n Xp1 = slice(iX[0], iX[1]+1))\n \n if 'X' in dropAxes:\n if iX[0]==len(ds['X']):\n iX[0]=iX[0]-1\n iX[1]=iX[1]-1\n ds = ds.isel(X = slice(iX[0], iX[1]+1))\n elif (('outer' in od._grid.axes['X'].coords and od._grid.axes['X'].coords['outer'].name == 'Xp1') or \n ('left' in od._grid.axes['X'].coords and od._grid.axes['X'].coords['left'].name == 'Xp1')):\n ds = ds.isel(X = slice(iX[0], iX[1]))\n elif 'right' in od._grid.axes['X'].coords and od._grid.axes['X'].coords['right'].name =='Xp1':\n ds = ds.isel(X = slice(iX[0]+1, iX[1]+1)) \n \n if 'Y' in dropAxes:\n if iY[0]==len(ds['Y']):\n iY[0]=iY[0]-1\n iY[1]=iY[1]-1\n ds = ds.isel(Y = slice(iY[0], iY[1]+1))\n elif (('outer' in od._grid.axes['Y'].coords and od._grid.axes['Y'].coords['outer'].name == 'Yp1') or \n ('left' in od._grid.axes['Y'].coords and od._grid.axes['Y'].coords['left'].name == 'Yp1')):\n ds = ds.isel(Y = slice(iY[0], iY[1]))\n elif 'right' in od._grid.axes['Y'].coords and od._grid.axes['Y'].coords['right'].name =='Yp1':\n ds = ds.isel(Y = slice(iY[0]+1, iY[1]+1))\n \n # Cut axis can't be periodic\n if (len(ds['Yp1']) < lenY or 'Y' in dropAxes) and 'Y' in periodic: periodic.remove('Y')\n if (len(ds['Xp1']) < lenX or 'X' in dropAxes) and 'X' in periodic: periodic.remove('X')\n \n # ---------------------------\n # Vertical CUTOUT\n # ---------------------------\n \n # Initialize vertical mask\n maskV = _xr.ones_like(ds['Zp1'])\n \n if ZRange is not None:\n # Use arrays\n ZRange = _np.asarray([_np.min(ZRange)-add_Vbdr, _np.max(ZRange)+add_Vbdr]).astype(ds['Zp1'].dtype)\n \n # Get the closest \n for i, Z in enumerate(ZRange):\n diff = _np.fabs(ds['Zp1']-Z)\n ZRange[i] = ds['Zp1'].where(diff==diff.min()).min().values \n maskV = maskV.where(_np.logical_and(ds['Zp1']>=ZRange[0], ds['Zp1']<=ZRange[-1]), 0) \n \n # Find vertical indexes\n maskV['Zp1'].values = _np.arange(len(maskV['Zp1']))\n dmaskV = maskV.where(maskV, drop=True)\n dZp1 = dmaskV['Zp1'].values\n iZ = [_np.min(dZp1), _np.max(dZp1)]\n maskV['Zp1'] = ds['Zp1']\n \n # Original length\n lenZ = len(ds['Zp1']) \n \n # Indexis\n if iZ[0]==iZ[1]:\n if 'Z' not in dropAxes:\n if iZ[0]>0: iZ[0]=iZ[0]-1\n else: iZ[1]=iZ[1]+1\n else: dropAxes.pop('Z', None)\n \n # Cutout\n ds = ds.isel(Zp1 = slice(iZ[0], iZ[1]+1))\n if 'Z' in dropAxes:\n if iZ[0]==len(ds['Z']):\n iZ[0]=iZ[0]-1\n iZ[1]=iZ[1]-1\n ds = ds.isel(Z = slice(iZ[0], iZ[1]+1))\n if 'Zu' in ds.dims and len(ds['Zu'])>1:\n ds = ds.sel(Zu=ds['Zp1'].values, method='nearest')\n if 'Zl' in ds.dims and len(ds['Zl'])>1:\n ds = ds.sel(Zl=ds['Zp1'].values, method='nearest')\n \n else:\n ds = ds.isel(Z = slice(iZ[0], iZ[1]))\n \n if 'Zu' in ds.dims and len(ds['Zu'])>1:\n ds = ds.sel(Zu = slice(ds['Zp1'].isel(Zp1=0).values, ds['Zp1'].isel(Zp1=-1).values))\n\n if 'Zl' in ds.dims and len(ds['Zl'])>1:\n ds = ds.sel(Zl = slice(ds['Zp1'].isel(Zp1=0).values, ds['Z'].isel(Z=-1).values))\n \n # Cut axis can't be periodic\n if (len(ds['Z']) < lenZ or 'Z' in dropAxes) and 'Z' in periodic: periodic.remove('Z')\n \n # ---------------------------\n # Time CUTOUT\n # ---------------------------\n \n # Initialize vertical mask\n maskT = _xr.ones_like(ds['time']).astype('int')\n \n if timeRange is not None:\n \n # Use arrays\n timeRange = _np.asarray([_np.min(timeRange), _np.max(timeRange)]).astype(ds['time'].dtype)\n \n # Get the closest \n for i, time in enumerate(timeRange):\n if _np.issubdtype(ds['time'].dtype, _np.datetime64):\n diff = _np.fabs(ds['time'].astype('float64') - time.astype('float64'))\n else:\n diff = _np.fabs(ds['time']-time)\n timeRange[i] = ds['time'].where(diff==diff.min()).min().values \n # return maskT, ds['time'], timeRange[0], timeRange[-1]\n maskT = maskT.where(_np.logical_and(ds['time']>=timeRange[0], ds['time']<=timeRange[-1]), 0) \n \n # Find vertical indexes\n maskT['time'].values = _np.arange(len(maskT['time']))\n dmaskT = maskT.where(maskT, drop=True)\n dtime = dmaskT['time'].values\n iT = [min(dtime), max(dtime)]\n maskT['time'] = ds['time']\n \n # Original length\n lenT = len(ds['time'])\n \n # Indexis\n if iT[0]==iT[1]:\n if 'time' not in dropAxes:\n if iT[0]>0: iT[0]=iT[0]-1\n else: iT[1]=iT[1]+1\n else: dropAxes.pop('time', None)\n \n # Cutout\n ds = ds.isel(time = slice(iT[0], iT[1]+1))\n if 'time' in dropAxes:\n if iT[0]==len(ds['time_midp']):\n iT[0]=iT[0]-1\n iT[1]=iT[1]-1\n ds = ds.isel(time_midp = slice(iT[0], iT[1]+1))\n else:\n ds = ds.isel(time_midp = slice(iT[0], iT[1]))\n \n # Cut axis can't be periodic\n if (len(ds['time']) < lenT or 'T' in dropAxes) and 'time' in periodic: periodic.remove('time')\n \n # ---------------------------\n # Horizontal MASK\n # ---------------------------\n \n if mask_outside and (YRange is not None or XRange is not None):\n if YRange is not None: minY = YRange[0]; maxY = YRange[1]\n else: minY = ds['YG'].min().values; maxY = ds['YG'].max().values\n if XRange is not None: minX = XRange[0]; maxX = XRange[1]\n else: minX = ds['XG'].min().values; maxX = ds['XG'].max().values \n \n maskC = _xr.where(_np.logical_and(_np.logical_and(ds['YC']>=minY, ds['YC']<=maxY),\n _np.logical_and(ds['XC']>=minX, ds['XC']<=maxX)), 1,0).persist()\n maskG = _xr.where(_np.logical_and(_np.logical_and(ds['YG']>=minY, ds['YG']<=maxY),\n _np.logical_and(ds['XG']>=minX, ds['XG']<=maxX)), 1,0).persist()\n maskU = _xr.where(_np.logical_and(_np.logical_and(ds['YU']>=minY, ds['YU']<=maxY),\n _np.logical_and(ds['XU']>=minX, ds['XU']<=maxX)), 1,0).persist()\n maskV = _xr.where(_np.logical_and(_np.logical_and(ds['YV']>=minY, ds['YV']<=maxY),\n _np.logical_and(ds['XV']>=minX, ds['XV']<=maxX)), 1,0).persist()\n for var in ds.data_vars:\n if set(['X', 'Y']).issubset(ds[var].dims): ds[var] = ds[var].where(maskC)\n elif set(['Xp1', 'Yp1']).issubset(ds[var].dims): ds[var] = ds[var].where(maskG)\n elif set(['Xp1', 'Y']).issubset(ds[var].dims): ds[var] = ds[var].where(maskU)\n elif set(['X', 'Yp1']).issubset(ds[var].dims): ds[var] = ds[var].where(maskV)\n \n # ---------------------------\n # TIME RESAMPLING\n # ---------------------------\n # Resample in time\n if timeFreq:\n \n # Infer original frequency\n inFreq=_pd.infer_freq(ds.time.values); \n if timeFreq[0].isdigit() and not inFreq[0].isdigit(): inFreq='1'+inFreq\n \n # Same frequency: Skip\n if timeFreq==inFreq:\n _warnings.warn(\"\\nInput time freq: [{}] = Output time frequency: [{}]:\"\n \"\\nSkip time resampling.\".format(inFreq, timeFreq), stacklevel=2)\n \n else:\n \n # Remove time_midp and warn\n vars2drop = [var for var in ds.variables if 'time_midp' in ds[var].dims]\n if vars2drop:\n _warnings.warn(\"\\nTime resampling drops variables on `time_midp` dimension.\"\n \"\\nDropped variables: {}.\".format(vars2drop), stacklevel=2)\n ds = ds.drop(vars2drop)\n if 'time_midp' in ds.dims: ds = ds.drop('time_midp')\n \n # Snapshot\n if sampMethod=='snapshot': \n # Find new times\n newtime = ds['time'].sel(time=ds['time'].resample(time=timeFreq).first())\n\n # Use slice when possible\n inds = [i for i, t in enumerate(ds['time'].values) if t in newtime.values]\n inds_diff = _np.diff(inds)\n if all(inds_diff==inds_diff[0]): \n ds = ds.isel(time = slice(inds[0], inds[-1]+1, inds_diff[0]))\n else: \n # TODO: is this an xarray bug od just bad chunking/bad coding/bad SciServe compute performances?\n # Make test case and open issue!\n attrs = ds.attrs\n ds = _xr.concat([ds.sel(time = time) for i, time in enumerate(newtime)], dim='time')\n ds.attrs = attrs\n # Mean\n elif sampMethod=='mean':\n\n # Separate time and timeless\n attrs = ds.attrs\n ds_dims = ds.drop([var for var in ds.variables if not var in ds.dims])\n ds_time = ds.drop([var for var in ds.variables if not 'time' in ds[var].dims])\n ds_timeless = ds.drop([var for var in ds.variables if 'time' in ds[var].dims])\n\n # Resample\n ds_time = ds_time.resample(time=timeFreq).mean('time')\n\n # Add all dimensions to ds, and fix attributes\n for dim in ds_time.dims:\n if dim=='time': ds_time[dim].attrs = ds_dims[dim].attrs\n else: ds_time[dim] = ds_dims[dim]\n\n # Merge\n ds = _xr.merge([ds_time, ds_timeless])\n ds.attrs = attrs\n \n # Update oceandataset\n od._ds = ds\n \n # Add time midp\n if timeFreq and 'time' not in dropAxes:\n od = od.set_grid_coords({**od.grid_coords, 'time' : {'time': -0.5}}, add_midp=True, overwrite=True)\n\n # Drop axes\n grid_coords = od.grid_coords\n for coord in list(grid_coords): \n if coord in dropAxes: grid_coords.pop(coord, None)\n od = od.set_grid_coords(grid_coords, overwrite=True)\n \n # Cut axis can't be periodic \n od = od.set_grid_periodic(periodic, overwrite = True)\n \n # Drop variables\n if varList is not None: \n if isinstance(varList, str): varList = [varList]\n \n # Compute missing variables\n od = _compute._add_missing_variables(od, varList)\n \n # Drop useless\n od._ds = od._ds.drop([v for v in od._ds.variables if (v not in od._ds.dims and v not in od._ds.coords and v not in varList)])\n \n return od", "def cut_tof_event(data_dict, plot_dict, event) :\n event_spacepoints = event.GetTOFEventSpacePoint()\n\n tof0_sp_size = event_spacepoints.GetTOF0SpacePointArraySize()\n tof1_sp_size = event_spacepoints.GetTOF1SpacePointArraySize()\n tof2_sp_size = event_spacepoints.GetTOF2SpacePointArraySize()\n\n if tof0_sp_size < 1 or tof1_sp_size < 1 or tof2_sp_size < 1 :\n return True\n\n tof0_sp = event_spacepoints.GetTOF0SpacePointArrayElement(0)\n tof1_sp = event_spacepoints.GetTOF1SpacePointArrayElement(0)\n tof2_sp = event_spacepoints.GetTOF2SpacePointArrayElement(0)\n\n if tof1_sp_size != 1 or tof2_sp_size != 1 :\n return True\n\n diff_0_1 = tof1_sp.GetTime() - tof0_sp.GetTime()\n diff_1_2 = tof2_sp.GetTime() - tof1_sp.GetTime()\n\n plot_dict['tof_0_1'].Fill( diff_0_1 )\n plot_dict['tof_1_2'].Fill( diff_1_2 )\n\n if diff_1_2 < TOF_CUT_LOW or diff_1_2 > TOF_CUT_HIGH :\n return True\n\n plot_dict['tof_0_1_cut'].Fill( tof1_sp.GetTime() - tof0_sp.GetTime() )\n plot_dict['tof_1_2_cut'].Fill( tof2_sp.GetTime() - tof1_sp.GetTime() )\n\n return False", "def pick_signals(processor, source = 'input'):\n\n if source == 'input':\n bin_edges = processor.input_parameters['bin_edges']\n raw_signal = processor.input_signal\n elif source == 'output':\n bin_edges = processor.output_parameters['bin_edges']\n raw_signal = processor.output_signal\n else:\n raise ValueError('Unknown value for the data source')\n t = np.zeros(len(raw_signal)*4)\n bins = np.zeros(len(raw_signal)*4)\n signal = np.zeros(len(raw_signal)*4)\n value = 1.\n\n for i, edges in enumerate(bin_edges):\n t[4*i] = edges[0]\n t[4*i+1] = edges[0]\n t[4*i+2] = edges[1]\n t[4*i+3] = edges[1]\n bins[4*i] = 0.\n bins[4*i+1] = value\n bins[4*i+2] = value\n bins[4*i+3] = 0.\n signal[4*i] = 0.\n signal[4*i+1] = raw_signal[i]\n signal[4*i+2] = raw_signal[i]\n signal[4*i+3] = 0.\n value *= -1\n\n z = t * c\n return (t, z, bins, signal)", "def butter_filter(datalist):\n fs = 200.00\n fHigh = 50.00\n fLow = 5.00\n N=4\n [b,a]=sg.butter(N,[fLow/fs, fHigh/fs], btype='band')\n global filtered\n #IIR filter\n return sg.filtfilt(b,a,datalist)", "def Flux_init(self, flns, oversample=None, sigma=None, tophat=None, thin=None, wave_cut=None, temp_cut=None, logg_cut=None, convert=None, linlog=False, verbose=False):\n ## Reading the parameter information about the spectra\n lst = []\n for i in np.arange(len(flns)):\n print(flns[i])\n ## Get the logg and temp value from the filename\n hdr = pyfits.getheader(flns[i], ext=0)\n temp = hdr['PHXTEFF']\n logg = hdr['PHXLOGG']\n if temp_cut is None or (temp >= temp_cut[0] and temp <= temp_cut[1]):\n print(' temp_cut')\n if logg_cut is None or (logg >= logg_cut[0] and logg <= logg_cut[1]):\n print(' logg_cut')\n lst.append( [i, logg, temp] )\n\n ## Reading the mu values\n self.mu = np.array(pyfits.getdata(flns[0], ext=1), dtype=float)\n n_mu = self.mu.size\n\n ## Sorting the grid by temperature and then logg\n print(lst)\n Utils.Misc.Sort_list(lst, [2,1])\n lst = np.array(lst)\n print(lst)\n\n ## Extracting the temperature values\n self.logtemp = np.log(np.unique(lst[:,2]))\n self.logtemp.sort()\n n_teff = self.logtemp.size\n\n ## Extracting the logg values\n self.logg = np.unique(lst[:,1])\n self.logg.sort()\n n_logg = self.logg.size\n\n ## If there is a mismatch and the grid is not rectangular, then the function aborts\n if n_teff*n_logg != lst.shape[0]:\n print( \"Number of temperature points: {}\".format(n_teff) )\n print( \"Number of logg points: {}\".format(n_logg) )\n print( \"Number of grid points: {}\".format(lst.shape[0]) )\n for teff in self.logtemp:\n for logg in self.logg:\n missing = True\n for l in lst:\n if np.log(l[2]) == teff and l[1] == logg:\n missing = False\n if missing:\n print(\"Missing -> logg: {:3.1f}, temp: {:5.0f}\".format(logg,np.exp(teff)))\n raise Exception( \"There is a mismatch in the number of log(g) and teff grid points!\" )\n return\n\n ## Extracting the data\n grid = []\n wav = []\n if verbose: print( \"Starting to read atmosphere grid files\" )\n for i,l in enumerate(lst[:,0]):\n if verbose: sys.stdout.write( \"Reading {} ({}/{})\\r\".format(flns[int(l)], i+1, lst.shape[0]) ); sys.stdout.flush()\n tmp = Read_AGSS(flns[int(l)], oversample=oversample, sigma=sigma, tophat=tophat, thin=thin, wave_cut=wave_cut, convert=convert, linlog=linlog)\n grid.append(tmp[0])\n wav.append(tmp[1])\n self.z0 = tmp[2]\n logger.log(8, \"Number of wavelength points: {}, range: [{}, {}]\".format(tmp[1].size, tmp[1][0], tmp[1][-1]) )\n if verbose: print( \"\\nFinished reading atmosphere grid files\" )\n try:\n wav = np.array(wav)\n if wav.std(0).max() > 1.e-6:\n raise Exception( \"The wavelength grid is not uniform!\" )\n return\n else:\n wav = wav[0]\n except:\n raise Exception( \"The wavelength grid has an inconsistent number of elements!\" )\n return\n if verbose: print( \"Transforming grid data to array\" )\n grid = np.asarray(grid)\n if verbose: print( \"Addressing the grid data shape\" )\n grid.shape = n_teff, n_logg, n_mu, wav.size\n self.wav = wav\n if verbose: print( \"Making the grid a class attribute\" )\n self.grid = grid\n\n ## Calculating the grid log-to-linear weights\n if linlog:\n self.wav_linear = Utils.Series.Resample_loglin(self.wav)\n self.wav_delta = self.wav_linear[1] - self.wav_linear[0]\n self.wav_frac, self.wav_inds = Utils.Series.Getaxispos_vector(self.wav, self.wav_linear)\n return", "def trigger_cut(\n df,\n nearest_data_trigger_allowed=5,\n furthest_simu_trigger_allowed=5\n ):\n df['trigger_cut'] = (\n ( abs(df['t_nearest_data_trigger']) > nearest_data_trigger_allowed )\n & ( abs(df['t_input_simu_trigger']) < furthest_simu_trigger_allowed )\n )", "def Cut(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_Cut(self, *args)", "def cut_standard(X, VARS, xcorr_flow=False):\n \n # Fiducial cuts\n #MINPT = 0.5\n #MAXETA = 2.4\n MINPT = 0.7\n MAXETA = 1.5\n \n \n # Construct cuts\n cuts = []\n names = []\n\n #\n cuts.append( X[:,VARS.index('has_gsf')] == True )\n names.append(f'has_gsf == True')\n #\n cuts.append( X[:,VARS.index('gsf_pt')] > MINPT )\n names.append(f'gsf_pt > {MINPT:0.2f}')\n #\n cuts.append( np.abs(X[:,VARS.index('trk_eta')]) < MAXETA )\n names.append(f'|gsf_eta| < {MAXETA:0.2f}')\n #\n #cuts.append( [(len(X[i,VARS.index('image_clu_eta')]) is not 0) for i in range(X.shape[0])] )\n #names.append(f'len(image_clu_eta) != 0')\n \n \n ind = aux.apply_cutflow(cut=cuts, names=names, xcorr_flow=xcorr_flow)\n return ind", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=1):\n b, a = butter_bandpass(lowcut, highcut, fs, order=order)\n y = filtfilt(b, a, data)\n return y", "def data_clean_analysis(dates, thresholds, thresholds_pairs):\n mpf = []\n tpt = []\n date = []\n thresh = []\n thresh_pairs = []\n\n for k, v in dates.items():\n for t in thresholds:\n for tp in thresholds_pairs:\n print(k)\n print(t)\n print(tp)\n print('-----')\n\n commits = pd.read_csv('../pub_data/test_commits_pub.csv', encoding='latin-1', sep='\\t')\n test_details = pd.read_csv('../pub_data/test_details_pub.csv', sep='\\t')\n test_status = pd.read_csv('../pub_data/test_histo_pub.csv', sep='\\t')\n mod_files = pd.read_csv(\"../pub_data/test_commits_mod_files_pub.csv\", sep='\\t')\n\n D = DataCI(commits, test_details, test_status, mod_files, start_date=v, threshold=t, threshold_pairs=tp)\n modification, transition = D.get_data_info()\n\n mpf.append(modification)\n tpt.append(transition)\n date.append(k)\n thresh.append(t)\n thresh_pairs.append(tp)\n\n print(len(date))\n print(len(thresh))\n print(len(thresh_pairs))\n print(len(mpf))\n print(len(tpt))\n\n df = pd.DataFrame(list(zip(date, thresh, thresh_pairs, mpf, tpt)),\n columns=['date', 'threshold', 'threshold_pairs', 'mpf', 'tpt']\n )\n\n df.to_pickle('start_date_analysis1.pkl')", "def featuresHist(self, **kwargs):\n\n # Selecting bins automatically:\n bins_onpower = np.arange(self.onpower_train.min().values[0],\n self.onpower_train.max().values[0],\n (self.onpower_train.max().values[0] -\n self.onpower_train.min().values[0]) / 50)\n\n bins_offpower = np.arange(self.offpower_train.min().values[0],\n self.offpower_train.max().values[0],\n (self.offpower_train.max().values[0] -\n self.offpower_train.min().values[0]) / 50)\n\n bins_duration = np.arange(self.duration_train.min().values[0],\n self.duration_train.max().values[0],\n (self.duration_train.max().values[0] -\n self.duration_train.min().values[0]) / 50)\n\n # If a bin has been specified update the bin sizes.\n for key in kwargs:\n if key == 'bins_onpower':\n bins_onpower = kwargs[key]\n elif key == 'bins_offpower':\n bins_offpower = kwargs[key]\n elif key == 'bins_duration':\n bins_duration = kwargs[key]\n else:\n print(\"Non valid kwarg\")\n\n # Plot structure:\n fig = plt.figure()\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n\n # Evaluating score for:\n # Onpower\n x = np.arange(bins_onpower.min(), bins_onpower.max() + \\\n np.diff(bins_onpower)[0], np.diff(bins_onpower)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.onpower, x)\n norm = pd.cut(\n self.onpower_train.onpower, bins=bins_onpower).value_counts().max() / max(y)\n # Plots for Onpower\n ax1.hist(\n self.onpower_train.onpower.values, bins=bins_onpower, alpha=0.5)\n ax1.plot(x, y * norm)\n #ax1.set_title(\"Feature: Onpower\")\n #ax1.set_ylabel(\"Counts\")\n #ax1.set_xlabel(\"On power (W)\")\n ax1.set_ylabel(\"On power counts\")\n\n # Offpower\n x = np.arange(bins_offpower.min(), bins_offpower.max() + \\\n np.diff(bins_offpower)[0], np.diff(bins_offpower)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.offpower, x)\n norm = pd.cut(self.offpower_train.offpower,\n bins=bins_offpower).value_counts().max() / max(y)\n # Plots for Offpower\n ax2.hist(self.offpower_train.offpower.values,\n bins=bins_offpower, alpha=0.5)\n ax2.plot(x, y * norm)\n #ax2.set_title(\"Feature: Offpower\")\n #ax2.set_ylabel(\"Counts\")\n #ax2.set_xlabel(\"Off power (W)\")\n ax2.set_ylabel(\"Off power counts\")\n\n # Duration\n x = np.arange(bins_duration.min(), bins_duration.max() + \\\n np.diff(bins_duration)[0], np.diff(bins_duration)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.duration, x)\n norm = pd.cut(self.duration_train.duration,\n bins=bins_duration).value_counts().max() / max(y)\n # Plots for duration\n ax3.hist(self.duration_train.duration.values,\n bins=bins_duration, alpha=0.5)\n ax3.plot(x, y * norm)\n #ax3.set_title(\"Feature: Duration\")\n #ax3.set_ylabel(\"Counts\")\n #ax3.set_xlabel(\"Duration (seconds)\")\n ax3.set_ylabel(\"Duration counts\")", "def filt(self,cutoff_dt, btype='low',order=3,axis=-1):\r\n \r\n if self.isequal==False and self.VERBOSE:\r\n print 'Warning - time series is unequally spaced. Use self.interp to interpolate onto an equal grid'\r\n \r\n if not btype == 'band':\r\n Wn = self.dt/cutoff_dt\r\n else:\r\n Wn = [self.dt/co for co in cutoff_dt]\r\n \r\n (b, a) = signal.butter(order, Wn, btype=btype, analog=0, output='ba')\r\n \r\n return signal.filtfilt(b, a, self.y, axis=axis)", "def cut_bkg(self):\n c = TCut(self.cut_both)\n c += TCut(self._return_if('_cut_bkg'))\n return c", "def pick_cuts_up_to_order(cuts, percentile):\n\n mask_orders_to_pick = cuts.costs <= np.percentile(cuts.costs, q=percentile)\n cuts.costs = cuts.costs[mask_orders_to_pick]\n cuts.values = cuts.values[mask_orders_to_pick, :]\n if cuts.names is not None:\n cuts.names = cuts.names[mask_orders_to_pick]\n if cuts.equations is not None:\n cuts.equations = cuts.equations[mask_orders_to_pick]\n\n return cuts", "def onAddCutToolClicked(self, event):\n i_cube = self.cube_choice.GetSelection()\n i_dimension = self.cut_dimension_choice.GetSelection()\n\n if i_dimension <= 0:\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut dimension not selected'))\n else:\n value = self.cut_value_textCtrl.GetValue()\n if not value.strip():\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut value not specified'))\n else:\n cube = self._OLAP_server.getCubes()[i_cube]\n dimension = cube.getDimensions()[i_dimension - 1]\n row = (dimension.getLabel(), dimension.getName(), value)\n self.appendListCtrlRow(listctrl=self.cut_listCtrl, row=row)\n\n # After adding, clear the controls\n self.cut_dimension_choice.SetSelection(0)\n self.cut_value_textCtrl.SetValue(u'')\n\n event.Skip()", "def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n raise NotImplemented", "def cut_data(xdata, ydata, yerrdata, cutmode=CUT_POINT, cutvalue=0):\n if cutmode == CUT_POINT:\n xdata_cut = xdata[cutvalue:]\n ydata_cut = ydata[cutvalue:]\n yerrdata_cut = yerrdata[cutvalue:]\n elif cutmode == CUT_RADIUS:\n ii = xdata >= cutvalue\n xdata_cut = xdata[ii]\n ydata_cut = ydata[ii]\n yerrdata_cut = yerrdata[ii]\n else:\n raise ValueError('Unknown cut mode: %s' % cutmode)\n return (xdata_cut, ydata_cut, yerrdata_cut)", "def plotMultipleVars(self, vars, series, groups=None, labels=None, postfix=\"\",logy=True, fixedrange=False):\n # split the variable names, we'll use the first one for naming purposes\n varnames = [var.split(\"_\") for var in vars]\n\n # create the separate dataframes from the provided groups\n # Define some labels if we have groups and no provided labels\n # Stack all the variables we want to plot in one histogram\n dfs = None\n if groups:\n dfs = [series.loc[g,:].stack() for g in groups]\n if not labels or len(labels) != len(groups):\n labels = [\"Group %s\" % (i+1) for i in xrange(len(groups)-1)]\n labels.append(\"Bulk\")\n else:\n dfs = [series.stack()]\n\n\n # Get right number of colors, and reverse them so that mediumpurple is \n # used for the bulk of the chips (assumed to be the last group)\n colors = (self.colorlist[:len(dfs)])\n colors.reverse()\n \n # Make the histogram\n # Get the preferred binning and check whether all values fall within that range \n if varnames[0][0] in cutinfo11:\n nbins = cutinfo11[varnames[0][0]][2]\n xmin = cutinfo11[varnames[0][0]][3]\n xmax = cutinfo11[varnames[0][0]][4]\n series_min = series.min().min()\n series_max = series.max().max()\n if fixedrange or (series_min > xmin and series_max < xmax):\n ax = plt.hist(dfs, bins=nbins, range=[xmin, xmax], stacked=True, \n color=colors, label=labels, log=logy)\n else:\n ax = plt.hist(dfs, bins=nbins, stacked=True, \n color=colors, label=labels, log=logy)\n else:\n ax = plt.hist(dfs, bins=20, stacked=True, \n color=colors, label=labels, log=logy)\n\n # Set the axis titles\n if varnames[0][0] in cutinfo11:\n if len(varnames[0]) == 1:\n plt.xlabel(cutinfo11[varnames[0][0]][0], \n fontsize=self.labelsize)\n else:\n plt.xlabel(\"%s ; %s\" % (cutinfo11[varnames[0][0]][0], varnames[0][1]), \n fontsize=self.labelsize)\n else:\n plt.xlabel(varnames[0][0], \n fontsize=self.labelsize)\n plt.ylabel(\"Number of measurements\", fontsize=self.labelsize)\n\n # set margins and format axis labels\n x0, x1, y0, y1 = plt.axis()\n if logy:\n plt.axis((x0, x1,\n 0.5, y1*10))\n else:\n plt.axis((x0, x1,\n 0.5, y1*(1+0.2)))\n ax = plt.gca()\n ax.tick_params(labelsize=self.ticklabelsize)\n plt.gcf().subplots_adjust(bottom=0.12)\n\n # Add mean and std info\n # Only use info on good chips, should be the last group in the list\n mean = dfs[-1].mean() #series.stack().mean()\n std = dfs[-1].std() #series.stack().std()\n plt.figtext(0.4, 0.92,\n \"Mean: %.3g Std/Mean: %.3g\\nStd: %.3g\"%(mean, std/mean, std),\n fontsize=self.ticklabelsize)\n\n # Add cut lines if we have info\n if self.cutfile != None and varnames[0][0] in cutinfo11:\n plt.axvline(x=self.cuts[varnames[0][0]][2], linestyle='dashed', linewidth=2, color='grey')\n plt.axvline(x=self.cuts[varnames[0][0]][3], linestyle='dashed', linewidth=2, color='grey')\n plt.axvline(x=self.cuts[varnames[0][0]][0], linestyle='solid', linewidth=2, color='dimgrey')\n plt.axvline(x=self.cuts[varnames[0][0]][1], linestyle='solid', linewidth=2, color='dimgrey')\n\n # Add legend if we have labels\n if labels:\n plt.legend(loc='best', ncol=2)\n\n # Save figure\n plt.savefig(\"%s/%s%s.pdf\" % (self.outputdir, varnames[0][0], postfix))\n plt.clf()", "def _conditions(self, beg=-90, intvl=20, con_type='ori', stim='bar', \n\t\t\t\t\tbiphasic=True, unit='deg', con_list=[], temp_freq = 2):\n\t\t\n\t\t\n\t\tcon_types = ['ori', 'spat_freq', 'temporal_freq', 'chromatic', 'dl_bar']\n\t\tstims = ['bar', 'grating']\n\t\t\n\t\t\n\t\t# Checking if condition and stimulus type recognised. \n\t\tif not con_type.lower() in con_types:\n\t\t\tprint('con_type not recognised. ' \n\t\t\t\t\t'Predefined options, if desired, are %s \\n'%con_types\n\t\t\t\t\t)\n\n\t\tif not stim.lower() in stims:\n\t\t\tprint('stimulus not recognised. ' \n\t\t\t\t\t'Predefined options, if desired, are %s \\n'%con_types\n\t\t\t\t\t)\n\n\n\t\t\n\t\tn_con = self.parameters['conditions']\n\t\t\n\t\tself.parameters['condition_type'] = con_type.lower()\n\t\tself.parameters['condition_unit'] = unit.capitalize()\n\t\tself.parameters['stimulus'] = stim.lower()\n\t\t\n\t\tif stim.lower() == stims[1]:\n\t\t\t# Gratings are GENERALLY not biphasic\n\t\t\tself.parameters['biphasic'] = 'N/A'\n\t\telse:\n\t\t\tself.parameters['biphasic'] = biphasic\n\t\t\n\t\t# Address issue of whether the sampling rate suits teh temporal frequency of \n\t\t# the grating for FFT analysis\n\t\tif stim.lower() == 'grating':\n\t\t\tself.parameters['temp_freq'] = float(temp_freq)\n\t\t\t\n\t\t\t# Sample rate must be a multiple of F1/temp_freq for it to be a frequency measured\n\t\t\t# in the FFT.\n\t\t\tsamp_rate = 1/float(self.bin_width)\n\t\t\t\n\t\t\t\n\t\t\tassert samp_rate % temp_freq == 0., ('Bin_width (%s) is incompatible wih obtaining' \n\t\t\t\t\t\t\t\t\t\t\t\t 'an FFT containing the specified temp_freq (%s). '\n\t\t\t\t\t\t\t\t\t\t\t\t 'The sampling frequency (1/bin_width) must be a'\n\t\t\t\t\t\t\t\t\t\t\t\t 'multiple of the temp_freq. \\n\\n Try as a' \n\t\t\t\t\t\t\t\t\t\t\t\t 'bin_width %s and rerun self._sort().'\n\t\t\t\t\t\t\t\t\t\t\t\t % (self.bin_width, temp_freq, \n\t\t\t\t\t\t\t\t\t\t\t\t\t1/(np.ceil(samp_rate/float(temp_freq))*temp_freq)))\n\t\t\n\t\tself.cond_label = []\n\n\t\t\n\t\tdef circ(ori, bound = 360):\n\t\t\t\"\"\"Func that Ensures all orientation values are between 0 and 360 degrees.\n\t\t\t\"\"\"\n\t\t\t# ori[ori<-360] += 720\n\t\t\t# ori[ori<0] += 360\n\t\t\t# ori[ori>360] -= 360\n\t\t\t# ori[ori>720] -= 720\n\n\n\t\t\treturn ori % bound\n\n\t\t# if list of conditions provided directly\n\t\tif len(con_list) > 0:\n\t\t\t\n\t\t\t# Must match number of conditions\n\t\t\tassert len(con_list) == n_con, ('the number of labels provided '\n\t\t\t\t\t\t\t\t\t\t'manually (%s) does not match the '\n\t\t\t\t\t\t\t\t\t\t'number of conditions (%s).' % \n\t\t\t\t\t\t\t\t\t\t(len(con_list), n_con))\n\t\t\t \n\t\t\t# Must all be strings \n\t\t\tassert all(isinstance(l, str) for l in con_list), ('not all the '\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'labels provided '\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'are strings')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t# List of conditions as strings\n\t\t\tself.cond_label = con_list\n\t\t\t\n\t\t\t# Convert to floats\n\t\t\t# Relying on numpy conversion error should list be unable to convert to float.\n\t\t\tself.conditions = np.array(con_list).astype('float')\n\t\t\t\n\t\t\t\n\t\t\tif biphasic:\n\t\t\t\t\n\n\t\t\t\t# self.conditions has been defined as an np.ndarray\n\t\t\t\tself.conditions2 = self.conditions \n\n\t\t\t# # Generate list of strings or labels\n\t\t\t# for c in range(n_con):\n\t\t\t# label = '%s / %s %s' %(self.conditions[c], self.conditions2[c],\n\t\t\t# self.parameters['condition_unit'])\n\t\t\t# self.cond_label.append(label)\n\n\t\t\t# else:\n\t\t\t# for c in range(n_con):\n\t\t\t\t\t\n\t\t\t# label = '%s %s' %(self.conditions[c],\n\t\t\t# self.parameters['condition_unit'])\n\t\t\t# self.cond_label.append(label)\n\n\t\t\t\t\n\t\t\n\t\t# if condition tpye is orientation\n\t\telif con_type.lower() == con_types[0]:\n\t\t\t\n\t\t\t# Generate full range of conditions\n\t\t\tself.conditions = circ(np.arange(beg, beg+(n_con*intvl), intvl))\n\t\t\t\n\t\t\tassert len(self.conditions) == n_con, ('The amount of condition labels (%s) '\n\t\t\t\t\t\t\t\t\t\t\t'and conditions (%s) do not match; '\n\t\t\t\t\t\t\t\t\t\t\t'check your condition parameters' % \n\t\t\t\t\t\t\t\t\t\t\t(self.cond_label.size, n_con))\n\t\t\t\n\t\t\tif biphasic:\n\t\t\t\t\n\n\t\t\t\t# self.conditions has been defined as an np.ndarray\n\t\t\t\tself.conditions2 = circ(self.conditions + 180) \n\n\t\t\t\t# Generate list of strings or labels\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\tlabel = '%s / %s %s' %(self.conditions[c], self.conditions2[c],\n\t\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)\n\t\t\t# Generate list of strings for non-biphasic. \n\t\t\telse:\n\t\t\t\t\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\tlabel = '%s %s' %(self.conditions[c],\n\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)\n\t\t\t\t\t\n\t\t# IF condition type is Spat Freq \n\t\telif con_type.lower() == con_types[1]:\n\t\t\tself.conditions = np.arange(beg, beg + (n_con*intvl), intvl)\n\t\t\t\n\t\t\tassert len(self.conditions) == n_con, ('The amount of condition labels (%s) '\n\t\t\t\t\t\t\t\t\t\t\t'and conditions (%s) do not match; '\n\t\t\t\t\t\t\t\t\t\t\t'check your condition parameters' % \n\t\t\t\t\t\t\t\t\t\t\t(self.cond_label.size, n_con))\n\n\t\t\tfor c in range(n_con):\n\t\t\t\tlabel = '%s %s' %(self.conditions[c], self.parameters['condition_unit'])\n\t\t\t\tself.cond_label.append(label)\n\n\t\t# IF condition type is dl_bar\t\t\t\t\t\n\t\telif con_type.lower() == con_types[4]:\n\n\t\t\tself.conditions = np.array([0, 1])\n\t\t\tself.cond_label = ['dark','light']\n\n\t\t\tif len(con_list) > 0:\n\t\t\t\tself.conditions = np.array(con_list).astype('float')\n\n\t\t\t\tif con_list[0] > con_list[1]:\n\t\t\t\t\tself.cond_label = self.cond_label[::-1]\n\n\t\t\tif biphasic:\n\n\t\t\t\tself.conditions2 = self.conditions\n\n\t\t\t\tself.cond_label.extend(\n\t\t\t\t\t[\n\t\t\t\t\t\tcl + ' second'\n\t\t\t\t\t\tfor cl in self.cond_label\n\t\t\t\t\t]\t\n\t\t\t\t\t)\n\n\n\n\t\t# if condition type is not predefined in this method, presume linear range \n\t\telif not con_type.lower() in con_types:\n\t\t\t\n\t\t\tself.conditions = np.arange(beg, beg+(n_con*intvl), intvl)\n\n\n\t\t\tif biphasic:\n\t\t\t\t\n\n\t\t\t\t# self.conditions has been defined as an np.ndarray\n\t\t\t\tself.conditions2 = self.conditions \n\n\t\t\t\t# Generate list of strings or labels\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\tlabel = '%s / %s %s' %(self.conditions[c], self.conditions2[c],\n\t\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)\n\n\t\t\telse:\n\t\t\t\tfor c in range(n_con):\n\t\t\t\t\t\n\t\t\t\t\tlabel = '%s %s' %(self.conditions[c],\n\t\t\t\t\t\t\t\t\t self.parameters['condition_unit'])\n\t\t\t\t\tself.cond_label.append(label)", "def SplitValues(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_SplitCurve_SplitValues(self, *args)", "def gaussify_bands(self, sigma):\n for key, band in self.bands.items():\n self.gauss_bands[key] = gaussian_filter(input=band, sigma=sigma)", "def fit_data(curves, snType='Ia',bands=None, models=None, params=None, bounds={}, ignore=None, constants=None,\n\t\t\t method='parallel',t0_guess=None,refModel=None,effect_names=[],effect_frames=[],fitting_method='nest',\n\t\t\t dust=None,flip=False,guess_amplitude=True,seriesError=None,showPlots=False,microlensing=None,\n\t\t\t kernel='RBF',seriesGrids=None,refImage='image_1',nMicroSamples=100,color_curve=None,verbose=True,**kwargs):\n\n\t#get together user arguments\n\targs = locals()\n\tfor k in kwargs.keys():\n\t\targs[k]=kwargs[k]\n\n\tif isinstance(curves,(list,tuple)):\n\t\targs['curves']=[]\n\t\tfor i in range(len(curves)):\n\t\t\ttemp=_sntd_deepcopy(curves[i])\n\t\t\ttemp.nsn=i+1\n\t\t\targs['curves'].append(temp)\n\t\targs['parlist']=True\n\telse:\n\t\targs['curves']=_sntd_deepcopy(curves)\n\t\targs['parlist']=False\n\n\targs['bands'] = [bands] if bands is not None and not isinstance(bands,(tuple,list,np.ndarray)) else bands\n\t#sets the bands to user's if defined (set, so that they're unique), otherwise to all the bands that exist in curves\n\n\targs['bands'] = list(set(bands)) if bands is not None else None\n\n\targs['bands'] = list(curves.bands) if not isinstance(curves,(list,tuple,np.ndarray)) else list(curves[0].bands)\n\n\tmodels=[models] if models and not isinstance(models,(tuple,list)) else models\n\tif not models:\n\t\tmod,types=np.loadtxt(os.path.join(__dir__,'data','sncosmo','models.ref'),dtype='str',unpack=True)\n\t\tmodDict={mod[i]:types[i] for i in range(len(mod))}\n\t\tif snType!='Ia':\n\t\t\tmods = [x[0] for x in sncosmo.models._SOURCES._loaders.keys() if x[0] in modDict.keys() and modDict[x[0]][:len(snType)]==snType]\n\t\telif snType=='Ia':\n\t\t\tmods = [x[0] for x in sncosmo.models._SOURCES._loaders.keys() if 'salt2' in x[0]]\n\telse:\n\t\tmods=models\n\tmods=set(mods)\n\targs['mods']=mods\n\t#sncosmo fitting function for model determination\n\targs['sn_func'] = {'minuit': sncosmo.fit_lc, 'mcmc': sncosmo.mcmc_lc, 'nest': nest_lc}\n\n\t#get any properties set in kwargs that exist for the defined fitting function\n\targs['props'] = {x: kwargs[x] for x in kwargs.keys() if\n\t\t\t\t\t x in [y for y in inspect.signature(args['sn_func'][fitting_method]).parameters] and x != 'verbose'}\n\n\tif method not in ['parallel','series','color']:\n\t\traise RuntimeError('Parameter \"method\" must be \"parallel\",\"series\", or \"color\".')\n\tif microlensing is not None and method !='parallel':\n\t\tprint('Microlensing uncertainty only set up for parallel right now, switching to parallel method...')\n\t\tmethod='parallel'\n\t\n\tif method=='parallel':\n\t\tif args['parlist']:\n\t\t\tpar_arg_vals=[]\n\t\t\tfor i in range(len(args['curves'])):\n\t\t\t\ttemp_args={}\n\t\t\t\tfor par_key in ['snType','bounds','constants','t0_guess','refModel','color_curve','seriesGrids']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tfor par_key in ['bands','models','ignore','params']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)) and np.any([isinstance(x,(list,tuple,np.ndarray)) for x in args[par_key]]):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tpar_arg_vals.append([args['curves'][i],temp_args])\n\t\t\tcurves=pyParz.foreach(par_arg_vals,_fitparallel,[args])\n\n\t\telse:\n\t\t\tcurves=_fitparallel(args)\n\telif method=='series':\n\t\tif args['parlist']:\n\n\t\t\tpar_arg_vals=[]\n\t\t\tfor i in range(len(args['curves'])):\n\t\t\t\ttemp_args={}\n\t\t\t\tfor par_key in ['snType','bounds','constants','t0_guess','refModel','color_curve','seriesGrids']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tfor par_key in ['bands','models','ignore','params']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)) and np.any([isinstance(x,(list,tuple,np.ndarray)) for x in args[par_key]]):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tpar_arg_vals.append([args['curves'][i],temp_args])\n\t\t\tcurves=pyParz.foreach(par_arg_vals,_fitseries,[args])\n\t\telse:\n\t\t\tcurves=_fitseries(args)\n\n\telse:\n\t\tif args['parlist']:\n\t\t\tpar_arg_vals=[]\n\t\t\tfor i in range(len(args['curves'])):\n\t\t\t\ttemp_args={}\n\t\t\t\tfor par_key in ['snType','bounds','constants','t0_guess','refModel','color_curve','seriesGrids']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tfor par_key in ['bands','models','ignore','params']:\n\t\t\t\t\tif isinstance(args[par_key],(list,tuple,np.ndarray)) and np.any([isinstance(x,(list,tuple,np.ndarray)) for x in args[par_key]]):\n\t\t\t\t\t\ttemp_args[par_key]=args[par_key][i] \n\t\t\t\tpar_arg_vals.append([args['curves'][i],temp_args])\n\t\t\tcurves=pyParz.foreach(par_arg_vals,_fitColor,[args])\n\t\telse:\n\t\t\tcurves=_fitColor(args)\n\n\treturn curves", "def filt_hp(sig: np.ndarray, Ss: int, Cfs: int, Cfs1: None,\n order=5) -> np.ndarray:\n nyq = 0.5 * Ss\n normal_cutoff = Cfs / nyq\n b, a = butter(order, normal_cutoff, btype='high', analog=False)\n return lfilter(b, a, sig)", "def apply_on_bins(self, bins, functions, return_dict=False):\n binned_data = self.bin(bins)\n\n if return_dict:\n return_values = {\n name: np.asarray(\n [func(bin, 0) for bin in binned_data]\n ).flatten()\n for name, func in functions.items()\n }\n else:\n return_values = GroupedArrays()\n for name, func in functions.items():\n return_values[name] = np.asarray([\n func(bin, 0) for bin in binned_data]\n ).flatten()\n\n return return_values", "def setCutRatios(self, cutRatios):\n\t\t# TODO: Check if this is an array\n\t\t# Adjust the ratio according to specifications\n\t\tfor i in range(len(cutRatios)):\n\t\t\tif cutRatios[i] < 0.5:\n\t\t\t\tcutRatios[i] = 1 - cutRatios[i]\n\t\t\telif cutRatios[i] > 1.0:\n\t\t\t\t# Scary\n\t\t\t\t# Maybe raise an error\n\t\t\t\tcutRatios[i] = cutRatios[i] - 1\n\t\tself.cutRatios = cutRatios", "def hist_data(list_source, frq=151, ln=False, data_lim=None):\n fluxes = []\n\n if data_lim is not None:\n min_acceptable = data_lim[0]\n else:\n min_acceptable = None\n if data_lim is not None:\n max_acceptable = data_lim[1]\n else:\n max_acceptable = None\n \n for gleam_obj in list_source:\n I = gleam_obj.flux_by_frq[frq]\n if is_constrained(I, min_acceptable, max_acceptable):\n if ln:\n fluxes.append(np.log(I))\n else:\n fluxes.append(I)\n \n return np.array(fluxes)", "def apply_trigger_first(cut_fn):\n def wrapped(arrays, cut):\n arrays = svjflatanalysis.arrayutils.apply_trigger_and_jetpt550(arrays, 2018)\n return cut_fn(arrays, cut)\n return wrapped", "def __init__(self, NumofBandits=10, MeanRange=[-5, 5], sigma=1):\n self.NumofBandits = NumofBandits\n self.sigma = sigma\n # Random generate the mean value of each action\n self.MeanList = np.random.uniform(MeanRange[0], MeanRange[1], self.NumofBandits)", "def fit(self, data, discrete_features=None, bandwidth=1.0,\n num_discretization_bins=4, pseudocount=1.0):\n if bandwidth <= 0:\n raise ValueError(\"Bandwidth must be positive.\")\n \n if discrete_features != None and \\\n len(discrete_features) != data.shape[1]:\n raise ValueError(\"Discrete features array and data arrays\"\n \"shape don't match.\")\n \n if num_discretization_bins < 0:\n raise ValueError(\"Number of descretization bins can't be negetive.\")\n \n if num_discretization_bins == 0:\n for bool in discrete_features:\n if bool:\n raise ValueError(\"Number of descretization bins can't be\"\n \"zero if there is a continuous feature.\")\n \n if pseudocount < 0:\n raise ValueError(\"Pseudocount can't be negative.\")\n \n if discrete_features == None:\n discrete_features = [False] * data.shape[1]\n\n self.num_features_ = data.shape[1]\n self.discrete_features_ = discrete_features\n self.num_discretization_bins_ = num_discretization_bins\n\n discretized_data = np.array(data, copy=True)\n continuous_data = data[:, np.invert(discrete_features)]\n\n discretizer = KBinsDiscretizer(n_bins=num_discretization_bins,\n encode='ordinal', strategy='quantile')\n discretizer.fit(continuous_data)\n\n discretized_data[:, np.invert(discrete_features)] = \\\n discretizer.transform(continuous_data)\n self.discretizer_ = discretizer\n\n self.model_ = BayesianNetwork.from_samples(discretized_data,\n algorithm='chow-liu', n_jobs=-1, pseudocount=pseudocount)\n self.model_.bake()\n \n # Table for bin edges\n bins = discretizer.bin_edges_\n\n # Kdes for continuous data.\n self.tnkdes_ = []\n\n i = 0\n for k in range(self.num_features_):\n if discrete_features[k]:\n continue\n \n bins[i][0] = -np.inf\n bins[i][len(bins[i]) - 1] = np.inf\n bin_kdes = []\n \n # loop of boundary\n for j in range(len(bins[i]) - 1):\n # Bound for this bin.\n lower_bound = bins[i][j]\n upper_bound = bins[i][j+1]\n \n # Create a kde using the data in the current bin.\n current_feature_data = data[:, k]\n cur_bin_data = current_feature_data[discretized_data[:, k] == j]\n kde = TruncatedNormalKernelDensity(bandwidth=bandwidth,\n lowerbound=lower_bound, upperbound=upper_bound)\n kde.fit(cur_bin_data)\n bin_kdes.append(kde)\n \n i = i + 1\n self.tnkdes_.append(bin_kdes)", "def create_intervals_hsb(confidence, n_samples, data):\n print(data)\n intervals = []\n if not isinstance(data, Iterable):\n assert isinstance(data, float)\n return [create_interval_hsb(confidence, n_samples, data)]\n for data_point in data:\n try:\n assert isinstance(data_point, float)\n except AssertionError:\n data_point = float(data_point)\n intervals.append(create_interval_hsb(confidence, n_samples, data_point))\n return intervals", "def apply_cuts(chain, isotope, tree, volume):\n\n #open file which inlcudes fill levels and fill days\n infile = open(\"/users/langrock/plotting_macros/Partial_fill/split_level.txt\",\"r\")\n\n #define root file to save root files to\n outputroot = ROOT.TFile(\"/data/langrock/PartialFill/Full/root/\" + isotope + \"_\" + chain + \"_\" + volume +\".root\",\"recreate\")\n\n #define histograms\n hist = define_histograms.DefineHistograms()\n\n events_full = 0\n events_pocut = 0\n events_deltatcut = 0\n events_bifidvolcut = 0\n events_deltarcut = 0\n events_bicut = 0\n events_allcut = 0\n\n #get fill days and fill level from file, loop through each line and perform the cut selection on each day of filling\n for line in infile:\n words = line.split()\n\n if len(words)!=0:\n\n d = float(words[0])\n z_level = float(words[1])\n \n #loop through the events in the root file\n for i in range(tree.GetEntries()):\n #get variables from previous events\n tree.GetEntry(i-1)\n nhits_prev = tree.nhits\n radius_prev = math.sqrt(tree.posx*tree.posx+tree.posy*tree.posy+tree.posz*tree.posz)\n time_prev = tree.uTNSecs + tree.uTSecs*math.pow(10,9) + tree.uTDays*24*60*60*math.pow(10,9)\n energy_prev = tree.energy\n fitValid_prev = tree.fitValid\n x_prev = tree.posx\n y_prev = tree.posy\n z_prev = tree.posz\n\n #get variables from current events\n tree.GetEntry(i)\n nhits = tree.nhits\n radius = math.sqrt(tree.posx*tree.posx+tree.posy*tree.posy+tree.posz*tree.posz)\n time = tree.uTNSecs + tree.uTSecs*math.pow(10,9) + tree.uTDays*24*60*60*math.pow(10,9)\n energy = tree.energy\n fitValid = tree.fitValid\n x = tree.posx\n y = tree.posy\n z = tree.posz\n\n #get day at which events were simulated\n day = tree.uTDays\n\n #define time differance and event distance\n delta_t = time - time_prev\n delta_r = math.sqrt(math.pow((x_prev - x),2) + math.pow((y_prev - y),2) + math.pow((z_prev - z),2))\n\n fidvol_value = 5000\n \n #if the event was generated on the current day of filling, apply cuts\n if d == day:\n\n #fill histograms and count events\n hist.h_energy_full.Fill(energy)\n hist.h_nhitspo_full.Fill(nhits)\n hist.h_nhitsbi_full.Fill(nhits_prev)\n hist.h_deltat_full.Fill(delta_t)\n hist.h_deltar_full.Fill(delta_r)\n hist.h_rfidvolbi_full.Fill(radius_prev)\n\n events_full += 1\n\n #apply fiducial vlume cut\n if radius> 0 and radius < fidvol_value and z >= z_level+653:\n\n hist.h_energy_pocut.Fill(energy)\n hist.h_nhitspo_pocut.Fill(nhits)\n hist.h_nhitsbi_pocut.Fill(nhits_prev)\n hist.h_deltat_pocut.Fill(delta_t)\n hist.h_deltar_pocut.Fill(delta_r)\n hist.h_rfidvolbi_pocut.Fill(radius_prev)\n \n events_pocut += 1\n\n #bipo212 cut selection\n if chain == \"bipo212\":\n #apply polonium candidate cut\n if nhits >= 450 and nhits <= 580:\n\n hist.h_energy_deltatcut.Fill(energy)\n hist.h_nhitspo_deltatcut.Fill(nhits)\n hist.h_nhitsbi_deltatcut.Fill(nhits_prev)\n hist.h_deltat_deltatcut.Fill(delta_t)\n hist.h_deltar_deltatcut.Fill(delta_r)\n hist.h_rfidvolbi_deltatcut.Fill(radius_prev)\n\n events_deltatcut += 1\n\n #time difference cut\n if delta_t < 3690:\n\n hist.h_energy_bifidvolcut.Fill(energy)\n hist.h_nhitspo_bifidvolcut.Fill(nhits)\n hist.h_nhitsbi_bifidvolcut.Fill(nhits_prev)\n hist.h_deltat_bifidvolcut.Fill(delta_t)\n hist.h_deltar_bifidvolcut.Fill(delta_r)\n hist.h_rfidvolbi_bifidvolcut.Fill(radius_prev)\n\n events_bifidvolcut += 1\n\n #fiducial radius cut on bismuth candidate\n if radius_prev > 0 and radius_prev < fidvol_value and z_prev >= z_level+653:\n\n hist.h_energy_deltarcut.Fill(energy)\n hist.h_nhitspo_deltarcut.Fill(nhits)\n hist.h_nhitsbi_deltarcut.Fill(nhits_prev)\n hist.h_deltat_deltarcut.Fill(delta_t)\n hist.h_deltar_deltarcut.Fill(delta_r)\n hist.h_rfidvolbi_deltarcut.Fill(radius_prev)\n\n events_deltarcut += 1\n\n #distance cut\n if delta_r > 0 and delta_r < 1500:\n\n hist.h_energy_bicut.Fill(energy)\n hist.h_nhitspo_bicut.Fill(nhits)\n hist.h_nhitsbi_bicut.Fill(nhits_prev)\n hist.h_deltat_bicut.Fill(delta_t)\n hist.h_deltar_bicut.Fill(delta_r)\n hist.h_rfidvolbi_bicut.Fill(radius_prev)\n\n events_bicut += 1\n\n #nhits cut on the bismuth candidate\n if nhits_prev >= 100:\n\n hist.h_energy_allcut.Fill(energy)\n hist.h_nhitspo_allcut.Fill(nhits)\n hist.h_nhitsbi_allcut.Fill(nhits_prev)\n hist.h_deltat_allcut.Fill(delta_t)\n hist.h_deltar_allcut.Fill(delta_r)\n hist.h_rfidvolbi_allcut.Fill(radius_prev)\n\n events_allcut += 1\n \n #bipo214 cut selection\n elif chain == \"bipo214\":\n #nhits cut on polonium candidate\n if nhits >= 290 and nhits <= 450:\n\n hist.h_energy_deltatcut.Fill(energy)\n hist.h_nhitspo_deltatcut.Fill(nhits)\n hist.h_nhitsbi_deltatcut.Fill(nhits_prev)\n hist.h_deltat_deltatcut.Fill(delta_t)\n hist.h_deltar_deltatcut.Fill(delta_r)\n hist.h_rfidvolbi_deltatcut.Fill(radius_prev)\n\n events_deltatcut += 1\n\n #time difference cut\n if delta_t > 3690 and delta_t < 1798788:\n\n hist.h_energy_bifidvolcut.Fill(energy)\n hist.h_nhitspo_bifidvolcut.Fill(nhits)\n hist.h_nhitsbi_bifidvolcut.Fill(nhits_prev)\n hist.h_deltat_bifidvolcut.Fill(delta_t)\n hist.h_deltar_bifidvolcut.Fill(delta_r)\n hist.h_rfidvolbi_bifidvolcut.Fill(radius_prev)\n\n events_bifidvolcut += 1\n\n #fiducial volume cut on bismuth candidate\n if radius_prev > 0 and radius_prev < fidvol_value and z_prev >= z_level+653:\n\n hist.h_energy_deltarcut.Fill(energy)\n hist.h_nhitspo_deltarcut.Fill(nhits)\n hist.h_nhitsbi_deltarcut.Fill(nhits_prev)\n hist.h_deltat_deltarcut.Fill(delta_t)\n hist.h_deltar_deltarcut.Fill(delta_r)\n hist.h_rfidvolbi_deltarcut.Fill(radius_prev)\n \n events_deltarcut += 1\n\n #distance cut\n if delta_r > 0 and delta_r < 1500:\n\n hist.h_energy_bicut.Fill(energy)\n hist.h_nhitspo_bicut.Fill(nhits)\n hist.h_nhitsbi_bicut.Fill(nhits_prev)\n hist.h_deltat_bicut.Fill(delta_t)\n hist.h_deltar_bicut.Fill(delta_r)\n hist.h_rfidvolbi_bicut.Fill(radius_prev)\n\n events_bicut += 1\n\n #nhits cut on the bismuth candidate\n if nhits_prev >= 600:\n\n hist.h_energy_allcut.Fill(energy)\n hist.h_nhitspo_allcut.Fill(nhits)\n hist.h_nhitsbi_allcut.Fill(nhits_prev)\n hist.h_deltat_allcut.Fill(delta_t)\n hist.h_deltar_allcut.Fill(delta_r)\n hist.h_rfidvolbi_allcut.Fill(radius_prev)\n\n events_allcut += 1\n \n #write all histograms to file\n outputroot.Write()\n outputroot.Close()\n\n #create string with all event counts\n outputstring = isotope + \"\\t all events: \" + str(events_full) + \"\\t fiducial volume: \" + str(events_pocut) + \"\\t Po nhits cut: \" + str(events_deltatcut) + \"\\t Delta t cut: \" + str(events_bifidvolcut) + \"\\t fiducial volume: \" + str(events_deltarcut) + \"\\t Delta r cut: \" + str(events_bicut) + \"\\t Bi nhits cut: \" + str(events_allcut) + \"\\n \" \n\n return outputstring", "def select_sources(cat_table, cuts):\n nsrc = len(cat_table)\n full_mask = np.ones((nsrc), bool)\n for cut in cuts:\n if cut == 'mask_extended':\n full_mask *= mask_extended(cat_table)\n elif cut == 'select_extended':\n full_mask *= select_extended(cat_table)\n else:\n full_mask *= make_mask(cat_table, cut)\n\n lout = [src_name.strip() for src_name in cat_table['Source_Name'][full_mask]]\n return lout", "def binspecdattomatch( wavelength, flux, wavetomatch, fluxerr=[], sigclip=0,\n sumerrs=False ):\n w,f = wavelength, flux\n if len(fluxerr):\n df = fluxerr\n else :\n df=np.zeros(len(f))\n\n wavetomatch = np.asarray(wavetomatch)\n wavetomatch_halfbinwidth = np.diff(wavetomatch)/2.\n lastbinlow = wavetomatch[-1] - wavetomatch_halfbinwidth[-1]\n lastbinhigh = wavetomatch[-1] + wavetomatch_halfbinwidth[-1]\n wavebinedges = np.append( wavetomatch[:-1]-wavetomatch_halfbinwidth,\n np.array([lastbinlow,lastbinhigh]))\n\n wbinned, dwbinned, fbinned, dfbinned = [], [], [], []\n for i in range(len(wavebinedges)-1):\n wavebinmin=wavebinedges[i]\n wavebinmax=wavebinedges[i+1]\n iinbin = np.where((w>=wavebinmin)&(w<wavebinmax))\n\n winbin = w[iinbin]\n finbin = f[iinbin]\n dfinbin = df[iinbin]\n\n if sigclip :\n # use sigma clipping to reject outliers\n igoodval = isigclip( finbin, sigclip )\n if len(igoodval) :\n wbinval = np.mean( winbin[igoodval] )\n fbinval = np.mean( finbin[igoodval] )\n dwbinval = (winbin[igoodval].max() - winbin[igoodval].min())/2.\n #dwbinval = (wbin.max() - wbin.min())/2.\n if sumerrs :\n # flux uncertainty is the quadratic sum of the mean flux error\n # and the error of the mean\n dfbinval1 = np.std( finbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n dfbinval2 = np.mean( dfinbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n dfbinval = np.sqrt( dfbinval1**2 + dfbinval2**2 )\n else :\n # flux uncertainty is the std error of the mean\n dfbinval = np.std( finbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n else :\n # use a straight median\n wbinval = np.median( winbin )\n fbinval = np.median( finbin )\n dwbinval = (winbin[-1]-winbin[0])/2.\n if sumerrs :\n # flux uncertainty is the quadratic sum of the mean flux error\n # and the error of the mean\n dfbinval1 = np.std( finbin )/np.sqrt(len(finbin)-2)\n dfbinval2 = np.mean( dfinbin )\n dfbinval = np.sqrt( dfbinval1**2 + dfbinval2**2 )\n else :\n # flux uncertainty is the std error of the mean\n dfbinval = np.std( finbin ) / np.sqrt(max(1,len(finbin)))\n\n wbinned.append( wbinval )\n fbinned.append( fbinval )\n dwbinned.append( dwbinval )\n dfbinned.append( dfbinval )\n\n return( np.array( wbinned ), np.array(dwbinned), np.array(fbinned), np.array(dfbinned) )", "def cutoff(self, *args, **kwargs) -> Any:\n pass", "def __init__(self,ptbins,etabins,data=None):\n self._ptbins = ptbins\n self._etabins = etabins\n if data is not None:\n self._data = data\n else:\n self._data = [ [ (0,0) for i in range(len(self._etabins)+1) ] for i in range(len(self._ptbins)+1) ]\n self.__check()", "def discretize_all(self, cond = 5, bins=3):\n\n self.bin_discretize(np.where(self.arity>cond)[0],bins)\n self.data=self.data.astype(int)", "def generate_cut_strings(var, bin_edges, bottom_inclusive=True):\n incl = '=' if bottom_inclusive else ''\n return ['{var} >{incl} {low:g} && {var} < {high:g}'.format(var=var, low=bin_low,\n high=bin_high, incl=incl)\n for (bin_low, bin_high) in bin_edges]", "def test_skl_hist_gradient_boosting_with_categorical():\n # We don't yet support HistGradientBoostingClassifier with categorical splits\n # So make sure that an exception is thrown properly\n rng = np.random.RandomState(0)\n n_samples = 1000\n f1 = rng.rand(n_samples)\n f2 = rng.randint(4, size=n_samples)\n X = np.c_[f1, f2]\n y = np.zeros(shape=n_samples)\n y[X[:, 1] % 2 == 0] = 1\n clf = HistGradientBoostingClassifier(max_iter=20, categorical_features=[1])\n clf.fit(X, y)\n np.testing.assert_array_equal(clf.is_categorical_, [False, True])\n\n with pytest.raises(\n NotImplementedError, match=r\"Categorical splits are not yet supported.*\"\n ):\n treelite.sklearn.import_model(clf)", "def model_hist(xvar, yvar, modfuncs, nbins=95, crange=(-10.0, 10.0)):\n hists = [TH2D(\n 'hmodel{0}{1}'.format(c, i), 'hmodel{0}{1}'.format(c, i),\n nbins, crange[0], crange[1],\n nbins, crange[0], crange[1]\n ) for (i, c) in ic]\n for xbin in range(nbins):\n xlo = hists[0].GetXaxis().GetBinLowEdge(xbin+1)\n xup = hists[0].GetXaxis().GetBinUpEdge(xbin+1)\n for ybin in range(nbins):\n ylo = hists[0].GetXaxis().GetBinLowEdge(ybin+1)\n yup = hists[0].GetXaxis().GetBinUpEdge(ybin+1)\n name = 'bin_{0}_{1}'.format(xbin, ybin)\n xvar.setRange(name, xlo, xup)\n yvar.setRange(name, ylo, yup)\n for hist, modfunc in zip(hists, modfuncs):\n integral = modfunc.createIntegral(\n RooArgSet(xvar, yvar),\n RooFit.NormSet(RooArgSet(xvar, yvar)),\n RooFit.Range(name)\n ).getVal()\n hist.SetBinContent(xbin+1, ybin+1, integral)\n return hists", "def test_wrong_number_of_filter_thresholds(self, threshold, n_inputs):\n with pytest.raises(ValueError, match=\"If multiple filter_thresholds are provided\"):\n coefficients(self.dummy_fn, n_inputs, 2, True, filter_threshold=threshold)", "def cut(x, y, scalars, area):\n xmin, xmax, ymin, ymax = area\n if len(x) != len(y):\n raise ValueError(\"x and y must have the same length\")\n inside = [i for i in xrange(len(x))\n if x[i] >= xmin and x[i] <= xmax and y[i] >= ymin and y[i] <= ymax]\n return [x[inside], y[inside], [s[inside] for s in scalars]]", "def cut(x, y, scalars, area):\n xmin, xmax, ymin, ymax = area\n if len(x) != len(y):\n raise ValueError(\"x and y must have the same length\")\n inside = [i for i in xrange(len(x))\n if x[i] >= xmin and x[i] <= xmax\n and y[i] >= ymin and y[i] <= ymax]\n return [x[inside], y[inside], [s[inside] for s in scalars]]" ]
[ "0.55629873", "0.5544333", "0.54654413", "0.53966707", "0.5293295", "0.5175403", "0.5154368", "0.50937045", "0.5059277", "0.5027049", "0.50096345", "0.49881732", "0.49795693", "0.4978986", "0.49496424", "0.4933744", "0.4921186", "0.49105307", "0.49013457", "0.48807377", "0.48686138", "0.48626393", "0.48209476", "0.4818179", "0.48062032", "0.47978446", "0.47949582", "0.4768826", "0.47638527", "0.47498536", "0.4729141", "0.4719727", "0.47184148", "0.4717857", "0.47169614", "0.4711209", "0.4709669", "0.4705734", "0.47018006", "0.469633", "0.46939197", "0.46908796", "0.4687088", "0.46819073", "0.46701536", "0.46660504", "0.46658692", "0.46604714", "0.46585548", "0.465407", "0.46426988", "0.46391135", "0.4635308", "0.46216506", "0.46153283", "0.4602054", "0.45966792", "0.45963544", "0.45899552", "0.45833117", "0.45666677", "0.4562584", "0.45595375", "0.45471376", "0.45424253", "0.45334724", "0.45323044", "0.45229122", "0.45212153", "0.4511119", "0.45106092", "0.45073536", "0.4505167", "0.45008215", "0.44973966", "0.44961432", "0.44960845", "0.44877875", "0.4483125", "0.44813848", "0.44791853", "0.44779688", "0.4471486", "0.44703308", "0.44654804", "0.446297", "0.4454724", "0.44518557", "0.44458064", "0.4439374", "0.4434735", "0.44282925", "0.44160154", "0.44157645", "0.44118264", "0.44085428", "0.44051948", "0.43994242", "0.43938276", "0.43849838" ]
0.6186934
0
Basic plotting style for a single roccurve, based on multiple signal and bkgs samples. Expects an ax object to be given, this function is not standalone
def plot_roccurve(signals, bkgs, cut_function, cut_values, ax): eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values) return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Plot the base line\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n # Plot the single roccurve\n line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax)\n line.set_label(bkgs[0].get_category())\n # Plot settings\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n return ax", "def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Get signal efficieny once\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n # Perform some basic plotting setup\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n # Then efficiencies per bkg category (ttjets, qcd, ...)\n bkg_categories = list(set([ b.get_category() for b in bkgs ]))\n bkg_categories.sort()\n lines = {}\n for bkg_cat in bkg_categories:\n # Get Datasets that have this category\n bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ]\n # Compute efficiency in this category\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values)\n # Draw roccurve for this category\n line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)\n line.set_label(bkg_cat)\n # Save this line in a dict for potential outputting/modifying\n lines[bkg_cat] = line\n return ax", "def plot(\n ecg, \n sample_rate = 500, \n title = 'ECG 12', \n lead_index = lead_index, \n lead_order = None,\n style = None,\n columns = 2,\n row_height = 6,\n show_lead_name = True,\n show_grid = True,\n show_separate_line = True,\n ):\n\n if not lead_order:\n lead_order = list(range(0,len(ecg)))\n secs = len(ecg[0])/sample_rate\n leads = len(lead_order)\n rows = ceil(leads/columns)\n # display_factor = 2.5\n display_factor = 1\n line_width = 0.5\n fig, ax = plt.subplots(figsize=(secs*columns * display_factor, rows * row_height / 5 * display_factor))\n display_factor = display_factor ** 0.5\n fig.subplots_adjust(\n hspace = 0, \n wspace = 0,\n left = 0, # the left side of the subplots of the figure\n right = 1, # the right side of the subplots of the figure\n bottom = 0, # the bottom of the subplots of the figure\n top = 1\n )\n\n fig.suptitle(title)\n\n x_min = 0\n x_max = columns*secs\n y_min = row_height/4 - (rows/2)*row_height\n y_max = row_height/4\n\n if (style == 'bw'):\n color_major = (0.4,0.4,0.4)\n color_minor = (0.75, 0.75, 0.75)\n color_line = (0,0,0)\n else:\n color_major = (1,0,0)\n color_minor = (1, 0.7, 0.7)\n color_line = (0,0,0.7)\n\n if(show_grid):\n ax.set_xticks(np.arange(x_min,x_max,0.2)) \n ax.set_yticks(np.arange(y_min,y_max,0.5))\n\n ax.minorticks_on()\n \n ax.xaxis.set_minor_locator(AutoMinorLocator(5))\n\n ax.grid(which='major', linestyle='-', linewidth=0.5 * display_factor, color=color_major)\n ax.grid(which='minor', linestyle='-', linewidth=0.5 * display_factor, color=color_minor)\n\n ax.set_ylim(y_min,y_max)\n ax.set_xlim(x_min,x_max)\n\n\n for c in range(0, columns):\n for i in range(0, rows):\n if (c * rows + i < leads):\n y_offset = -(row_height/2) * ceil(i%rows)\n # if (y_offset < -5):\n # y_offset = y_offset + 0.25\n\n x_offset = 0\n if(c > 0):\n x_offset = secs * c\n if(show_separate_line):\n ax.plot([x_offset, x_offset], [ecg[t_lead][0] + y_offset - 0.3, ecg[t_lead][0] + y_offset + 0.3], linewidth=line_width * display_factor, color=color_line)\n\n \n t_lead = lead_order[c * rows + i]\n \n step = 1.0/sample_rate\n if(show_lead_name):\n ax.text(x_offset + 0.07, y_offset - 0.5, lead_index[t_lead], fontsize=9 * display_factor)\n ax.plot(\n np.arange(0, len(ecg[t_lead])*step, step) + x_offset, \n ecg[t_lead] + y_offset,\n linewidth=line_width * display_factor, \n color=color_line\n )", "def plotCurves(self, dataByModel):\n prFigure = pyplot.figure()\n self.configChart()\n prAx = prFigure.add_subplot(111)\n prAx.set_xlabel('Recall')\n prAx.set_ylabel('Precision')\n prAx.set_title('PR Curve')\n prAx.grid(True)\n\n rocFigure = pyplot.figure()\n self.configChart()\n rocAx = rocFigure.add_subplot(111)\n rocAx.set_xlabel('Fallout / FPR')\n rocAx.set_ylabel('Recall')\n rocAx.set_title('ROC Curve')\n rocAx.grid(True)\n\n corrFigure = pyplot.figure()\n self.configChart()\n corrAx = corrFigure.add_subplot(111)\n corrAx.set_xlabel('predict score')\n corrAx.set_ylabel('real score')\n corrAx.set_title('Correlation Curve')\n corrAx.grid(True)\n\n precisionFigure = pyplot.figure()\n self.configChart()\n precisionAx = precisionFigure.add_subplot(111)\n precisionAx.set_xlabel('score')\n precisionAx.set_ylabel('Precision')\n precisionAx.set_title('Threshold score vs precision')\n precisionAx.grid(True)\n\n recallFigure = pyplot.figure()\n self.configChart()\n recallAx = recallFigure.add_subplot(111)\n recallAx.set_xlabel('score')\n recallAx.set_ylabel('Recall')\n recallAx.set_title('Threshold score vs recall')\n recallAx.grid(True)\n\n falloutFigure = pyplot.figure()\n self.configChart()\n falloutAx = falloutFigure.add_subplot(111)\n falloutAx.set_xlabel('score')\n falloutAx.set_ylabel('Fallout (False Positive Rate)')\n falloutAx.set_title('Threshold score vs fallout')\n falloutAx.grid(True)\n\n for (model, data) in list(dataByModel.items()):\n (recalls, precisions) = list(zip(*(data['PR'])))\n prAx.plot(recalls, precisions, marker='o', linestyle='--', label=model)\n\n (fallouts, recalls) = list(zip(*(data['ROC'])))\n rocAx.plot(fallouts, recalls, marker='o', linestyle='--', label=model)\n\n (pCtrs, eCtrs) = list(zip(*(data['CORR'])))\n corrAx.plot(pCtrs, eCtrs, label=model)\n\n (score, recall, precision, fallout) = list(zip(*(data['cutoff'])))\n\n recallAx.plot(score, recall, label=model + '_recall')\n precisionAx.plot(score, precision, label=model + '_precision')\n falloutAx.plot(score, fallout, label=model + '_fallout')\n\n # saving figures\n ensure_dir(self.output_dir)\n prAx.legend(loc='upper right', shadow=True)\n prFigure.savefig('%s/pr_curve.png' % self.output_dir)\n\n rocAx.legend(loc='lower right', shadow=True)\n rocFigure.savefig('%s/roc_curve.png' % self.output_dir)\n\n corrAx.legend(loc='upper left', shadow=True)\n corrFigure.savefig('%s/corr_curve.png' % self.output_dir)\n\n precisionAx.legend(loc='upper left', shadow=True)\n precisionFigure.savefig('%s/precision.png' % self.output_dir)\n\n recallAx.legend(loc='lower left', shadow=True)\n recallFigure.savefig('%s/recall.png' % self.output_dir)\n\n falloutAx.legend(loc='upper right', shadow=True)\n falloutFigure.savefig('%s/fallout.png' % self.output_dir)\n\n pyplot.close()\n pngs = '{result}/pr_curve.png {result}/roc_curve.png {result}/corr_curve.png {result}/precision.png {result}/recall.png {result}/fallout.png'.format(result=self.output_dir)\n print('png: ', pngs)", "def show_rgn(ax, rgn, **kwargs):\n \n alpha = 0.1\n #lw = 0.1\n \n if rgn['shape'] == 'box':\n ax.plot([rgn['params']['blcx']]*2, \n [rgn['params']['blcy'],rgn['params']['trcy']], 'r-', **kwargs)\n ax.plot([rgn['params']['blcx'],rgn['params']['trcx']], \n [rgn['params']['blcy']]*2, 'r-', **kwargs)\n ax.plot([rgn['params']['blcx'],rgn['params']['trcx']], \n [rgn['params']['trcy']]*2, 'r-', **kwargs)\n ax.plot([rgn['params']['trcx']]*2, \n [rgn['params']['blcy'],rgn['params']['trcy']], 'r-', **kwargs)\n \n elif rgn['shape'] == 'circle':\n patch = mpatches.Circle((rgn['params']['cx'], rgn['params']['cy']), \n rgn['params']['r'], alpha=alpha, transform=ax.transData)\n #plt.figure().artists.append(patch)\n ax.add_patch(patch)\n \n elif rgn['shape'] == 'polygon':\n for poly in rgn['params']['Polygons']:\n patch = mpatches.Polygon(poly.get_vertices(), closed=True, \n alpha=alpha, transform=ax.transData)\n ax.add_patch(patch)\n \n elif rgn['shape'] == 'pixel':\n ax.plot(region['params']['cy'], region['params']['cx'], 'rs', ms=5)", "def plot_roc_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups,\n quant,\n plot=True):\n\n fs = 16\n\n fprs, tprs, tprs_std, ctds, brss = {}, {}, {}, {}, {}\n\n fprs['all'] = {}\n tprs['all'] = {}\n ctds['all'] = {}\n brss['all'] = {}\n \n for group in groups:\n\n fprs[group] = {}\n tprs[group] = {}\n ctds[group] = {}\n brss[group] = {}\n \n for fold in set(folds):\n \n ate = a[folds == fold]\n str_test = baseline_models.structure_for_eval_(t[folds == fold],\n e[folds == fold])\n \n if len(set(folds)) == 1:\n \n atr = ate\n str_train = str_test\n \n else:\n atr = a[folds != fold]\n str_train = baseline_models.structure_for_eval_(t[folds != fold],\n e[folds != fold])\n\n t_tr_max = np.max([t_[1] for t_ in str_train])\n t_ = np.array([t_[1] for t_ in str_test])\n \n clean = (t_<=t_tr_max)\n \n str_test = str_test[t_<=t_tr_max]\n ate = ate[t_<=t_tr_max]\n \n scores_f = scores[fold][clean]\n \n for group in groups:\n \n te_protg = (ate == group)\n tr_protg = (atr == group)\n \n try:\n roc_m = cumulative_dynamic_auc(str_train[tr_protg], str_test[te_protg],\n -scores_f[te_protg], [quant])\n brs_m = brier_score(str_train[tr_protg], str_test[te_protg],\n scores_f[te_protg], quant)\n ctd_m = concordance_index_ipcw(str_train[tr_protg], str_test[te_protg],\n -scores_f[te_protg], quant)[0]\n \n except:\n roc_m = cumulative_dynamic_auc(str_train, str_test[te_protg],\n -scores_f[te_protg], [quant])\n brs_m = brier_score(str_train, str_test[te_protg],\n scores_f[te_protg], quant)\n ctd_m = concordance_index_ipcw(str_train, str_test[te_protg],\n -scores_f[te_protg], quant)[0]\n \n fprs[group][fold] = roc_m[0][0][1] \n tprs[group][fold] = roc_m[0][0][0] \n ctds[group][fold] = ctd_m\n brss[group][fold] = brs_m[1][0]\n \n roc_m = cumulative_dynamic_auc(str_train, str_test, -scores_f, [quant])\n ctd_m = concordance_index_ipcw(str_train, str_test, -scores_f, quant)[0]\n brs_m = brier_score(str_train, str_test, scores_f, quant)\n \n fprs['all'][fold], tprs['all'][fold] = roc_m[0][0][1], roc_m[0][0][0]\n ctds['all'][fold] = ctd_m\n brss['all'][fold] = brs_m[1][0]\n \n cols = ['b', 'r', 'g']\n\n roc_auc = {}\n ctds_mean = {}\n brss_mean = {}\n \n j = 0\n\n for group in list(groups) + ['all']:\n\n all_fpr = np.unique(np.concatenate([fprs[group][i] for i in set(folds)]))\n\n # The ROC curves are interpolated at these points.\n mean_tprs = []\n for i in set(folds):\n mean_tprs.append(np.interp(all_fpr, fprs[group][i], tprs[group][i]))\n\n # Finally the interpolated curves are averaged over to compute AUC.\n mean_tpr = np.mean(mean_tprs, axis=0)\n std_tpr = 1.96 * np.std(mean_tprs, axis=0) / np.sqrt(10)\n\n fprs[group]['macro'] = all_fpr\n tprs[group]['macro'] = mean_tpr\n tprs_std[group] = std_tpr\n\n roc_auc[group] = auc(fprs[group]['macro'], tprs[group]['macro'])\n\n ctds_mean[group] = np.mean([ctds[group][fold] for fold in folds])\n brss_mean[group] = np.mean([brss[group][fold] for fold in folds])\n \n lbl = str(group)\n lbl += ' AUC:' + str(round(roc_auc[group], 3))\n lbl += ' Ctd:'+ str(round(ctds_mean[group], 3))\n lbl += ' BS:'+ str(round(brss_mean[group], 3))\n \n if plot:\n ax.plot(\n all_fpr,\n mean_tpr,\n c=cols[j],\n label=lbl)\n\n ax.fill_between(\n all_fpr,\n mean_tpr - std_tpr,\n mean_tpr + std_tpr,\n color=cols[j],\n alpha=0.25)\n\n j += 1\n \n if plot:\n ax.set_xlabel('False Positive Rate', fontsize=fs)\n ax.set_ylabel('True Positive Rate', fontsize=fs)\n ax.legend(fontsize=fs)\n ax.set_xscale('log')\n\n return roc_auc, ctds_mean, brss_mean", "def plot_ROC_curves(fig, ax, y_all, perf, title=None):\n curves = {'IMPRESS_all': 'royalblue',\n 'IMPRESS_HE_only': 'plum',\n 'IMPRESS_IHC_only': 'pink',\n 'pathologists_eval': 'tomato'}\n \n type_convert = {'IMPRESS_all': 'IMPRESS',\n 'IMPRESS_HE_only': 'IMPRESS (H&E only)',\n 'IMPRESS_IHC_only': 'IMPRESS (IHC only)',\n 'pathologists_eval': 'Pathologists'}\n \n for fgroup in curves.keys():\n tprs = []\n aucs = []\n mean_fpr = np.linspace(0, 1, 100)\n ax.set_aspect('equal')\n for seed in range(int(y_all[fgroup].shape[1]/3)):\n y_true = y_all[fgroup].loc[:,'y_true'].iloc[:,seed]\n y_pred_proba = y_all[fgroup].loc[:,'y_pred_proba'].iloc[:,seed]\n tpr, fpr, treshold = roc_curve(y_true, 1-y_pred_proba)\n tprs.append(np.interp(mean_fpr, fpr, tpr))\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n ax.plot(fpr, tpr, color=curves[fgroup], linewidth=2, alpha=0.10, label=None)\n \n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n \n ax.plot(mean_fpr, mean_tpr, color=curves[fgroup],\n label=r'%s (AUC = %0.4f $\\pm$ %0.2f)' % \\\n (type_convert[fgroup], perf[fgroup].loc['AUC','mean'], perf[fgroup].loc['AUC','std']),\n linewidth=3.0, alpha=0.80)\n \n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n \n if fgroup == 'IMPRESS_all':\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.1,\n label=r'$\\pm$ 1 standard deviation')\n else:\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.1,\n label=None)\n \n ax.set_xlabel('False positive rate')\n ax.set_ylabel('True positive rate')\n x = [0.0, 1.0]\n plt.plot(x, x, linestyle='dashed', color='red', linewidth=2.0, label='Random')\n plt.legend(fontsize=10, loc='best')\n \n if title is not None:\n fig.suptitle(t=title, fontsize=12)\n return fig", "def one_data_figure_shaded(obs, axobj, color='Blue', facecolor='Blue',\n **kwargs):\n \n x, y, e = obs['wavelength'], obs['spectrum'], obs['unc']\n axobj.fill_between(x, y-e, y+e, facecolor='grey', alpha=0.3)\n axobj.plot(x, y, color = color, linewidth = 0.5,**kwargs)\n\n return axobj", "def plot_roc_curves(labels, probas, name='', ax=None):\n # Setup axis\n if ax is None:\n fig, ax = plt.subplots(figsize=(20, 10))\n\n plot_roc(labels, probas, name=name, ax=ax)\n\n # Plot chance\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='black', alpha=.8)\n\n # Fill bottom right\n ax.fill_between([0, 1], [0, 1], alpha=0.3, color='black')\n\n # Settings\n ax.set_xlabel('False Positive Rate or (1 - Specifity)', fontsize=15)\n ax.set_ylabel('True Positive Rate or (Sensitivity)', fontsize=15)\n ax.set_title('Receiver Operating Characteristic', weight='bold', fontsize=18)\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n ax.legend(loc='lower right')\n\n return ax", "def _rfigure(self, legend=True, fig=None, ax=None):\n if fig is None and ax is None:\n fig, ax = plt.subplots()\n suptitle = True\n elif fig is None:\n fig = ax.get_figure()\n suptitle = False\n elif ax is None:\n ax = fig.gca()\n suptitle = False\n\n ax.grid(True)\n\n line_rstr = None\n line_rrls = None\n line_lstr = None\n line_lrls = None\n line_minima = None\n line_maxima = None\n t = self.timevector\n for axis, trace in zip('xy', ['positionX', 'positionY']):\n s = self.get_data(traces=trace) * 1e6 # m -> µm\n r_str_rls = self.stress_release_pairs(axis=axis, direction='right')\n l_str_rls = self.stress_release_pairs(axis=axis, direction='left')\n rstr = r_str_rls['stress']['idx']\n lstr = l_str_rls['stress']['idx']\n rrls = r_str_rls['release']['idx']\n lrls = l_str_rls['release']['idx']\n\n ax.plot(t, s, lw=0.1, ms=2, color='k', alpha=1.0)\n\n # line_rstr = None\n # line_rrls = None\n # line_lstr = None\n # line_lrls = None\n for rstr, rrls in zip(rstr, rrls):\n line_rstr, = ax.plot(t[rstr], s[rstr], lw=0.4, ms=2, color='m')\n line_rrls, = ax.plot(t[rrls], s[rrls], lw=0.4, ms=2, color='c')\n for lstr, lrls in zip(lstr, lrls):\n line_lstr, = ax.plot(t[lstr], s[lstr], lw=0.4, ms=2, color='g')\n line_lrls, = ax.plot(t[lrls], s[lrls], lw=0.4, ms=2, color='y')\n\n # line_minima = None\n # line_maxima = None\n for segment in self._sf.sections[axis]:\n minima = self.undecimate_and_limit(segment['minima'])\n maxima = self.undecimate_and_limit(segment['maxima'])\n line_minima, = ax.plot(t[minima], s[minima], '.', ms=5,\n color='b')\n line_maxima, = ax.plot(t[maxima], s[maxima], '.', ms=5,\n color='r')\n\n line_excited_x = None\n for x_c in (self.undecimate_and_limit(self._sf.excited['x'])\n / self.resolution):\n line_excited_x = ax.hlines(0.0, x_c[0], x_c[1], alpha=1,\n colors='b', linestyle='solid', lw=1)\n # ax.plot(x_c[0], 0.5, '.k', alpha=1, ms=3)\n # ax.plot(x_c[1], 0.5, '.k', alpha=1, ms=3)\n ax.vlines(x_c[0], -0.01, 0.01, alpha=1, colors='b',\n linestyle='solid', lw=1)\n ax.vlines(x_c[1], -0.01, 0.01, alpha=1, colors='b',\n linestyle='solid', lw=1)\n\n line_excited_y = None\n for y_c in (self.undecimate_and_limit(self._sf.excited['y'])\n / self.resolution):\n line_excited_y = ax.hlines(0.0, y_c[0], y_c[1], alpha=1,\n colors='r', linestyle='solid', lw=1)\n # ax.plot(y_c[0], -0.5, '.k', alpha=1, ms=3)\n # ax.plot(y_c[1], -0.5, '.k', alpha=1, ms=3)\n ax.vlines(y_c[0], -0.01, 0.01, alpha=1, colors='r',\n linestyle='solid', lw=1)\n ax.vlines(y_c[1], -0.01, 0.01, alpha=1, colors='r',\n linestyle='solid', lw=1)\n\n ax.set_xlim((t[0], t[-1]))\n\n ax.set_xlabel(\"Time (s)\")\n ax.set_ylabel(\"Signal positionX and Y (µm)\")\n if suptitle:\n fig.suptitle(\"Automatically detected excited axis, minima, \"\n \"maxima, and sections.\")\n\n if legend:\n if line_minima is not None:\n line_minima.set_label('minima')\n if line_maxima is not None:\n line_maxima.set_label('maxima')\n if line_rstr is not None:\n line_rstr.set_label('rightstress')\n if line_rrls is not None:\n line_rrls.set_label('rightrelease')\n if line_lstr is not None:\n line_lstr.set_label('leftstress')\n if line_lrls is not None:\n line_lrls.set_label('leftrelease')\n if line_excited_x is not None:\n line_excited_x.set_label('excited x')\n if line_excited_y is not None:\n line_excited_y.set_label('excited y')\n\n ax.legend(loc='upper right')\n\n return fig", "def _plot_ribbon_using_bezier(ax, zorder, points1, points2, color1=\"gray\",\n color2=\"gray\", lw=1):\n cc = ColorConverter()\n color1 = np.array(cc.to_rgba(color1))\n color2 = np.array(cc.to_rgba(color2))\n tRange = np.linspace(0, 1, 100)\n xpointsList = []\n ypointsList = []\n for points in [points1, points2]:\n points = np.array(points)\n p1 = points[0]\n p2 = points[1]\n p3 = points[2]\n p4 = points[3]\n allPoints = (p1[:, np.newaxis] * (1 - tRange) ** 3 + p2[:, np.newaxis]\n * (3 * (1 - tRange) ** 2 * tRange) + p3[:, np.newaxis] *\n (3 * (1 - tRange) * tRange ** 2) + p4[:, np.newaxis] *\n tRange ** 3)\n xpoints = allPoints[0]\n xpointsList.append(xpoints)\n ypoints = allPoints[1]\n ypointsList.append(ypoints)\n ax.plot(xpoints, ypoints, \"0.85\", lw=lw, zorder=zorder + 0.5)\n xpoints = xpointsList[0]\n if (mpl.colors.colorConverter.to_rgba_array(color1) ==\n mpl.colors.colorConverter.to_rgba_array(color2)).all():\n ax.fill_between(xpoints, ypointsList[0], ypointsList[1], lw=lw,\n facecolor=color1, edgecolor=color1, zorder=zorder)\n else:\n for i in range(len(tRange) - 1):\n #mean = (tRange[i]+tRange[i+1])*0.5\n xnow = np.mean(xpoints[i:i + 2])\n norm_mean = (xnow - xpoints[0]) / (xpoints[-1] - xpoints[0])\n color = color1 * (1 - norm_mean) + color2 * norm_mean\n ax.fill_between(xpoints[i:i + 2], ypointsList[0][i:i + 2],\n ypointsList[1][i:i + 2], lw=lw, facecolor=color,\n edgecolor=color, zorder=zorder)", "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)", "def plot_model_curves(class_name, model, range_metrics, ax):\n def plot_axis(ax, data, color):\n \"\"\"\n Plot data on axis in certain color\n \"\"\"\n x_indices = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n ax.scatter(x_indices, data, color=color, s=4)\n ax.plot(x_indices, data, color=color, linewidth=2)\n ax.set_yticks([]) # same for y ticks\n ax.set_ylim([0, 1])\n # Get balanced purities\n preds = np.concatenate(model.results)\n if model.name == \"Binary Classifiers\":\n purities = get_binary_balanced_purity_ranges(\n preds, model.class_labels, 0.1, model.class_counts)[class_name]\n else:\n purities = get_balanced_purity_ranges(\n preds, model.class_labels, 0.1, model.class_counts)[class_name]\n\n # Get completenesses\n comps = get_completeness_ranges(model.class_counts, range_metrics, class_name)\n\n print(\"\\n\\n Model: \" + str(model.name) + \", class: \" + class_name)\n print(\"Completeness\")\n print(comps)\n print(\"Purity\")\n print(purities)\n\n plot_axis(ax, comps, C_BAR_COLOR)\n ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis\n ax2.set_ylim([0, 1])\n plot_axis(ax2, purities, P_BAR_COLOR)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\n return ax2", "def plot_curve(self, fig, ax, linewidth=1.5, linestyle='-', color='black', u1=0.00, u2=1.00):\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n u = np.linspace(u1, u2, 501)\n X = np.real(self.get_value(u))\n line, = ax.plot(u, X[0,:])\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n u = np.linspace(u1, u2, 501)\n X, Y = np.real(self.get_value(u))\n line, = ax.plot(X, Y)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n u = np.linspace(u1, u2, 501)\n X, Y, Z = np.real(self.get_value(u))\n line, = ax.plot(X, Y, Z)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n else: raise Exception('The number of dimensions must be 1, 2 or 3')\n\n return fig, ax", "def construct_plot(self, amprtb):\n self.fig, [[self.ax1, self.ax2], [self.ax3, self.ax4]] = \\\n plt.subplots(2, 2, figsize=(10, 10),\n subplot_kw={'projection': self.projection})\n ind1, ind2 = amprtb._get_scan_indices(\n self.scanrange, self.timerange, False)\n\n # 10 GHz plot\n stuff = amprtb.plot_ampr_track(\n var='10'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax1, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange, return_flag=True)\n self.ax1.set_title(self.make_title('10', amprtb, ind1, ind2))\n\n # 19 GHz plot\n amprtb.plot_ampr_track(\n var='19'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax2, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax2.set_title(self.make_title('19', amprtb, ind1, ind2))\n\n # 37 GHz plot\n amprtb.plot_ampr_track(\n var='37'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax3, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax3.set_title(self.make_title('37', amprtb, ind1, ind2))\n\n # 85 GHz plot\n amprtb.plot_ampr_track(\n var='85'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax4, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax4.set_title(self.make_title('85', amprtb, ind1, ind2))\n\n # plt.tight_layout()\n return True", "def plot_autocorrs(self, axis=0, n_rows=4, n_cols=8):\n self.current_plot = 'multi'\n self.ax_zoomed = False\n \n bls = self.uv.d_uv_data['BASELINE']\n\n # Extract the relevant baselines using a truth array\n # bls = bls.tolist()\n bl_ids = set([256*i + i for i in range(1, n_rows * n_cols + 1)])\n bl_truths = np.array([(b in bl_ids) for b in bls])\n \n #print self.uv.d_uv_data['DATA'].shape\n #x_data = self.d_uv_data['DATA'][bl_truths,0,0,:,0,axis] # Baselines, freq and stokes\n #x_cplx = x_data[:,:,0] + 1j * x_data[:,:,1]\n\n x_cplx = self.stokes[axis][bl_truths]\n\n\n \n # Plot the figure\n #print self.uv.n_ant\n fig = self.sp_fig\n figtitle = '%s %s: %s -- %s'%(self.uv.telescope, self.uv.instrument, self.uv.source, self.uv.date_obs)\n for i in range(n_rows):\n for j in range(n_cols):\n ax = fig.add_subplot(n_rows, n_cols, i*n_cols + j +1)\n ax.set_title(self.uv.d_array_geometry['ANNAME'][i*n_cols + j], fontsize=10)\n #ax.set_title(\"%s %s\"%(i, j))\n \n x = x_cplx[i*n_cols+j::self.uv.n_ant]\n \n if self.scale_select.currentIndex() == 0 or self.scale_select.currentIndex() == 1:\n if x.shape[0] == self.uv.n_ant:\n self.plot_spectrum(ax, x, label_axes=False)\n else:\n self.plot_spectrum(ax, x, stat='max', label_axes=False)\n self.plot_spectrum(ax, x, stat='med', label_axes=False)\n self.plot_spectrum(ax, x, stat='min', label_axes=False)\n else:\n self.plot_spectrum(ax, x, label_axes=False)\n self.updateFreqAxis(ax)\n \n if i == n_rows-1:\n ax.set_xlabel('Freq')\n if j == 0:\n ax.set_ylabel('Amplitude')\n \n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.xticks(rotation=30)\n \n plt.subplots_adjust(left=0.05, right=0.98, top=0.95, bottom=0.1, wspace=0.3, hspace=0.45)\n return fig, ax", "def plot_roc_curve(tprs, aucs, tag=''):\n fig, ax = plt.subplots()\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n mean_fpr = np.linspace(0, 1, 100)\n\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = np.std(aucs)\n ax.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\n ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic example\")\n ax.legend(loc=\"lower right\")\n plt.tight_layout()\n plt.savefig(f'roc_{tag}.png')\n plt.show()", "def _roc_plot(self, roc_curves):\n # figure\n p = default_figure(\n {\n \"x_range\": (-0.01, 1.1),\n \"y_range\": (-0.01, 1.1),\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\"\n }\n )\n\n # main lines added to the plot\n self._default_models_lines(p, roc_curves)\n\n # baseline comparison\n p.line(\n [0, 1], # line x=y\n [0, 1],\n line_dash=\"dashed\",\n line_width=1,\n color=self.plot_design.models_dummy_color,\n legend_label=\"Random Baseline\",\n muted_alpha=0.5 # clicked line in the Legend will be muted\n )\n\n # plot specific styling\n p.legend.location = \"bottom_right\"\n p.xaxis.axis_label = \"False Positive Rate\"\n p.yaxis.axis_label = \"True Positive Rate\"\n\n return p", "def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()", "def plot_curves():\n lm = np.arange(0, 1.8, .01)\n vm = np.arange(-1.2, 1.2, .01)\n lt = np.arange(0, 1.07, .01)\n plt.subplot(2,1,1)\n plt.plot(lm, force_length_muscle(lm), 'r')\n plt.plot(lm, force_length_parallel(lm), 'g')\n plt.plot(lt, force_length_tendon(lt), 'b')\n plt.legend(('CE', 'PE', 'SE'))\n plt.xlabel('Normalized length')\n plt.ylabel('Force scale factor')\n plt.subplot(2, 1, 2)\n plt.plot(vm, force_velocity_muscle(vm), 'k')\n plt.xlabel('Normalized muscle velocity')\n plt.ylabel('Force scale factor')\n plt.tight_layout()\n plt.show()", "def traces(mndata,Params,srate,imagepath):\n\t#plot high gamma traces\n\t#data should be bandpassed (todo)\n\t#resample to srate\n\tst = resample(Params[\"st\"],srate)\n\ten = resample(Params[\"en\"],srate)\n\tbl_en = resample(Params[\"bl_en\"],srate)\n\tbl_st = resample(Params[\"bl_st\"],srate)\n\tplot_tp = resample(Params[\"plot\"],srate)\n\tcue = resample(500,srate)\n\t\n\tcolors = ['red','orange','green','blue']\n\tx = np.array(range(st,en+1))\n\tf, (ax,ax2) = plt.subplots(1,2, sharex = False)\n\tax.axhline(y = 0,color = 'k',linewidth=2)\n\tax.axvline(x = 0,color='k',linewidth=2)\n\tax.axvline(x = cue,color = 'gray',linewidth = 2)\n\tax.axvline(x = cue+cue,color = 'gray',linewidth = 2)\n\tax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')\n\n\tfor j in range(len(Params[\"conditions\"])):\n\t\tcondition = Params['conditions'][j]\n\t\ty = mndata[condition]['data']\n\t\tax.plot(x,y, label = condition,linewidth = 2,color = colors[j])\n\t\n\tax.set_ylim((-30,85))\n\tax.set_xlim(st,en)\n\tax.legend()\n\tax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)\n\tax.xaxis.set_ticks(range(st,en,plot_tp))\n\n\tax.set_xlabel(\"time (ms)\")\n\tax.set_ylabel(\"% change baseline\")\n\tax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)\n\n\t#plot brain with elec location\n\t#brain = plt.imread(imagepath)\n\t#aa = pylab.mean(brain,2)\n\t#ax2.imshow(aa)\n\t#a2.gray()\n\n\t#brain = Image.open(imagepath)\n\t#ax2.set_axis_off()\n\t#im = plt.imshow(brain, origin = 'lower')\n\n\t#brain = _png.read_png(imagepath)\n\t#imagebox = OffsetImage(brain,zoom =5)\n\t#ab = AnnotationBbox(imagebox,)\n\n\tim = Image.open(imagepath)\n\tax2.imshow(im,aspect = 'auto',origin = 'lower')\n\tax2.set_xlim((0,750))\n\tax2.set_title('Electrode Location',fontsize = 18)\n\n\n\n\treturn f, (ax, ax2)", "def draw_lines(asr,ax):\n r = asr.value\n y = 475.\n x = (r**2-y**2)**(.5)\n xs = np.linspace(-x,x,10)\n yt = np.zeros(xs.size)+y\n yb = np.zeros(xs.size)-y\n ax.plot(xs,yt,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n ax.plot(xs,yb,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n return ax", "def plotPsCurve(mcoolsPath:list,celltypeNames:list,chroms:list,resolution=100000,title=\"P(s) curve\",plotType=\"interaction\",base=1.1,log_x=True,log_y=True):\n import plotly.express as px\n from IPython.display import Image\n\n #Calculate P(s) data, get a 3 column pd.DataFrame with (bin,resolution,celltype)\n psDataAll = []\n for i in range(len(mcoolsPath)):\n psDataAll.append(compartment.getPsData(mcoolsPath[i],[\"chr\"+str(i+1) for i in range(len(chroms))],resolution=resolution,celltype=celltypeNames[i],base=base)) \n merged = pd.concat(psDataAll)\n\n data = pd.merge(merged,merged.groupby(\"celltype\").sum(),how=\"left\",on=\"celltype\").assign(prob= lambda df: df.aveCount_x/df.aveCount_y)\n\n fig = px.line(x=data[\"bin_x\"]*resolution,y=data[\"prob\"],color=data[\"celltype\"],title=title,log_x=log_x,log_y=log_y).update_layout(template='simple_white')\n fig.update_layout(width=800,height=600)\n fig.update_layout(xaxis_title=\"Genomic Distance(bp)\",\n yaxis_title=\"Contact Probability\")\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))", "def plot_forest(self):\n ax, = az.plot_forest(self.ifd_, var_names=[\"avg\", \"a_coef\", \"b_vals_coef\", \"b_mask_coef\", \"c_vals_coef\", \"c_mask_coef\"])\n ax.axvline(0, linestyle=':', color='black')\n # return ax", "def plot_along(a, title=''):\n f, ax = plt.subplots(2, figsize=(16, 16), dpi= 80, )#wspace=0, hspace=0)\n\n x = a['xo'].values\n pc = a['pc'].values\n pn = a['pn'].values\n crl = a['crl'].values\n chisqr = a['chisqr'].values\n\n for i in ax[1:2]:\n i.grid()\n i.xaxis.label.set_size(15)\n i.yaxis.label.set_size(15)\n i.tick_params(labelsize=15)\n i.title.set_size(20)\n\n ax[0].plot(x, pc, 'k', lw=3, label='$P_c$')\n ax[0].plot(x, pn, '.6', lw=3, label='$P_n$')\n ax[0].fill_between(x, pc, pn, where=pc >= pn, alpha=.2, label='Dominantly Specular')\n ax[0].fill_between(x, pc, pn, where=pc <= pn, alpha=.2, label='Dominatly Diffuse')\n ax[0].set_title('RSR-derived Coherent and Incoherent Energies', fontweight=\"bold\", fontsize=20)\n ax[0].set_ylabel('$[dB]$')\n ax[0].set_xlim(0, x.max())\n ax[0].legend(loc=3, ncol=2, fontsize='large')\n\n ax_chisqr = ax[1].twinx()\n ax_chisqr.plot(x, chisqr, '.6', lw=3)\n ax_chisqr.set_ylabel('Chi-square', color='.6')\n ax_chisqr.yaxis.label.set_size(15)\n ax_chisqr.tick_params(labelsize=15)\n\n ax[1].plot(x, crl, 'k', lw=3)\n ax[1].set_title('Quality Metrics', fontweight=\"bold\", fontsize=20)\n ax[1].set_ylabel('Correlation Coefficient')\n ax[1].set_xlim(0, x.max())\n ax[1].set_ylim(0, 1.1)\n ax[1].legend(loc=3, ncol=2, fontsize='large')\n ax[1].set_xlabel('Bin #')", "def plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n group,\n quant,\n strat='quantile',\n adj='IPCW', \n plot=True):\n\n allscores = np.ones_like(t).astype('float')\n\n for fold in set(folds):\n allscores[folds == fold] = scores[fold]\n\n scores = allscores\n\n b_fc = (0, 0, 1, .4)\n r_fc = (1, 0, 0, .2)\n\n b_ec = (0, 0, 1, .8)\n r_ec = (1, 0, 0, .8)\n\n n_bins = 20\n\n hatch = '//'\n\n fs = 16\n\n prob_true_n, _, outbins, ece = calibration_curve(\n scores,\n e,\n t,\n a,\n group,\n quant,\n typ=adj,\n ret_bins=True,\n strat=strat,\n n_bins=n_bins)\n \n for d in range(len(prob_true_n)):\n\n binsize = outbins[d + 1] - outbins[d]\n binloc = (outbins[d + 1] + outbins[d]) / 2\n\n gap = (prob_true_n[d] - binloc)\n\n if gap < 0:\n bottom = prob_true_n[d]\n else:\n bottom = prob_true_n[d] - abs(gap)\n\n if d == len(prob_true_n) - 1:\n lbl1 = 'Score'\n lbl2 = 'Gap'\n else:\n lbl1 = None\n lbl2 = None\n \n if plot:\n ax.bar(\n binloc,\n prob_true_n[d],\n width=binsize,\n facecolor=b_fc,\n edgecolor=b_ec,\n linewidth=2.5,\n label=lbl1)\n ax.bar(\n binloc,\n abs(gap),\n bottom=bottom,\n width=binsize,\n facecolor=r_fc,\n edgecolor=r_ec,\n linewidth=2.5,\n hatch=hatch,\n label=lbl2)\n\n d += 1\n \n if plot:\n \n ax.plot([0, 1], [0, 1], c='k', ls='--', lw=2, zorder=100)\n\n ax.set_xlabel('Predicted Score', fontsize=fs)\n ax.set_ylabel('True Score', fontsize=fs)\n\n ax.legend(fontsize=fs)\n ax.set_title(str(group), fontsize=fs)\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n\n ax.grid(ls=':', lw=2, zorder=-100, color='grey')\n ax.set_axisbelow(True)\n\n ax.text(\n x=0.030,\n y=.7,\n s='ECE=' + str(round(ece, 3)),\n size=fs,\n bbox=dict(boxstyle='round', fc='white', ec='grey', pad=0.2))\n\n return ece", "def plot_mean_roc_curve_of_classifiers(classifier_roc_list, data_set_description):\n if const.RECORD_RESULTS is True:\n fig = plt.figure(figsize=(8, 6.66))\n monochrome = (cycler(\"color\", [\"k\"]) * cycler(\"marker\", [\"\"]) *\n cycler(\"linestyle\", [\"-\", \"--\", \"-.\"]))\n color_arr = [\"#64B3DE\", \"#1f78b4\", \"#6ABF20\", \"#FBAC44\", \"#bc1659\", \"#B9B914\", \"#33a02c\", \"#ff7f00\", \"#6a3d9a\", \"black\", \"#b15928\", \"#e31a1c\"]\n plt.rc(\"axes\", prop_cycle=monochrome)\n line_style_index = 0\n color_index = 0\n\n for (test_run_roc_list, classifier_description) in classifier_roc_list:\n if not (None, None) in test_run_roc_list[0]:\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n count = 0\n for roc_list in test_run_roc_list:\n for (tpr, fpr) in roc_list:\n mean_tpr += interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n count += 1\n\n mean_tpr /= float(count)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n line_width = 0.5\n if line_style_index == 1:\n line_width = 0.8\n elif line_style_index == 2:\n line_width = 1.5\n\n plt.plot(mean_fpr, mean_tpr, c=color_arr[color_index], lw=line_width, alpha=1, label=\"{0} ({1:.3f})\".format(classifier_description, mean_auc))\n line_style_index = (line_style_index + 1) % 3\n color_index += 1\n\n plt.locator_params(axis='x', nbins=10)\n plt.locator_params(axis='y', nbins=10)\n plt.plot([0, 1], [0, 1], \"k--\", label=\"Random classification\", lw=0.8)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"ROC curve for each classifier\")\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}_roc_classifier_plot_{1}.png\".format(data_set_description, current_time), bbox_inches=\"tight\")\n plt.close(fig)", "def plot_control_points(self, fig, ax, linewidth=1.25, linestyle='-.', color='red', markersize=5, markerstyle='o'):\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n Px = np.real(self.P)\n u = np.linspace(0, 1, Px.size)\n line, = ax.plot(u, Px[0,:])\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n Px, Py = np.real(self.P)\n line, = ax.plot(Px, Py)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n Px, Py, Pz = np.real(self.P)\n line, = ax.plot(Px, Py, Pz)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n else: raise Exception('The number of dimensions must be 2 or 3')\n\n return fig, ax", "def plot(self, ax, graph=None, graph_i=None, type_plot='', ignoreNext=0, boxplot=None, violinplot=None, violinplotkwargs={}):\n from grapa.graph import Graph\n handle = None\n # check default arguments\n if boxplot is None:\n boxplot = {'y': [], 'positions': [], 'labels': [], 'color': [],\n 'i': 0}\n if violinplot is None:\n violinplot = {'y': [], 'positions': [], 'labels': [], 'color': []}\n if graph is None:\n graph_i = None\n else:\n if graph[graph_i] != self:\n graph_i = None\n if graph_i is None:\n for c in range(len(graph)):\n if graph[c] == self:\n graph_i = c\n break\n if graph_i is None:\n graph = None # self was not found in graph\n print('Warning Curve.plot: Curve not found in provided Graph')\n\n # retrieve basic information\n alter = graph._getAlter() if graph is not None else ['', '']\n attr = self.getAttributes()\n linespec = self.attr('linespec')\n # construct dict of keywords based on curves attributes, in a very\n # restrictive way\n # some attributes are commands for plotting, some are just related to\n # the sample, and no obvious way to discriminate between the 2\n fmt = {}\n for key in attr:\n if not isinstance(key, str):\n print(type(key), key, attr[key])\n if ((not isinstance(attr[key], str) or attr[key] != '')\n and key in Graph.dataInfoKeysGraph\n and key not in ['plot', 'linespec', 'type', 'ax_twinx',\n 'ax_twiny', 'offset', 'muloffset',\n 'labelhide', 'colorbar', 'xerr', 'yerr']):\n fmt[key] = attr[key]\n # do not plot curve if was asked not to display it.\n if 'linestyle' in fmt and fmt['linestyle'] in Curve.LINESTYLEHIDE:\n return None, ignoreNext\n # some renaming of kewords, etc\n if 'legend' in fmt:\n fmt['label'] = fmt['legend']\n del fmt['legend']\n if 'cmap' in fmt and not isinstance(fmt['cmap'], str):\n # convert Colorscale into matplotlib cmap\n from grapa.colorscale import Colorscale\n fmt['cmap'] = Colorscale(fmt['cmap']).cmap()\n if 'vminmax' in fmt:\n if isinstance(fmt['vminmax'], list) and len(fmt['vminmax']) > 1:\n if (fmt['vminmax'][0] != '' and not np.isnan(fmt['vminmax'][0])\n and not np.isinf(fmt['vminmax'][0])):\n fmt.update({'vmin': fmt['vminmax'][0]})\n if (fmt['vminmax'][1] != '' and not np.isnan(fmt['vminmax'][1])\n and not np.isinf(fmt['vminmax'][1])):\n fmt.update({'vmax': fmt['vminmax'][1]})\n del fmt['vminmax']\n\n # start plotting\n # retrieve data after transform, including of offset and muloffset\n x = self.x_offsets(alter=alter[0])\n y = self.y_offsets(alter=alter[1])\n type_graph = self.attr('type', 'plot')\n if type_plot.endswith(' norm.'):\n type_graph = type_plot[:-6]\n y = y / max(y)\n\n # add keyword arguments which are in the plot method prototypes\n try:\n sig = inspect.signature(getattr(ax, type_graph))\n for key in sig.parameters:\n if key in attr and key not in fmt:\n fmt.update({key: attr[key]})\n except AttributeError:\n print('Curve.plot: desired plotting method not found ('+type_graph\n + '). Going for default.')\n # for xample 'errorbar_yerr' after suppression of previous Curve\n # 'errorbar'. Will be 'plot' anyway.\n pass\n except Exception as e:\n print('Exception in Curve.plot while identifying keyword',\n 'arguments:')\n print(type(e), e)\n\n if 'labelhide' in attr and attr['labelhide']:\n if 'label' in fmt:\n del fmt['label']\n\n # No support for the following methods (either 2D data, or complicated\n # to implement):\n # hlines, vlines, broken_barh, contour, contourf, polar,\n # pcolor, pcolormesh, streamplot, tricontour, tricontourf,\n # tripcolor\n # Partial support for:\n # imgshow\n attrIgnore = ['label', 'plot', 'linespec', 'type', 'ax_twinx',\n 'ax_twiny', 'offset', 'muloffset', 'labelhide',\n 'colorbar']\n # \"simple\" plotting methods, with prototype similar to plot()\n if type_graph in ['semilogx', 'semilogy', 'loglog', 'plot_date',\n 'stem', 'step', 'triplot']:\n handle = getattr(ax, type_graph)(x, y, linespec, **fmt)\n elif type_graph in ['fill']:\n if self.attr('fill_padto0', False):\n handle = ax.fill([x[0]]+list(x)+[x[-1]], [0]+list(y)+[0], linespec, **fmt)\n else:\n handle = ax.fill(x, y, linespec, **fmt)\n # plotting methods not accepting formatting string as 3rd argument\n elif type_graph in ['bar', 'barbs', 'barh', 'cohere', 'csd',\n 'fill_between', 'fill_betweenx', 'hexbin',\n 'hist2d', 'quiver', 'xcorr']:\n handle = getattr(ax, type_graph)(x, y, **fmt)\n # plotting of single vector data\n elif type_graph in ['acorr', 'angle_spectrum', 'eventplot', 'hist',\n 'magnitude_spectrum', 'phase_spectrum', 'pie',\n 'psd', 'specgram']:\n # careful with eventplot, the Curve data are modified\n handle = getattr(ax, type_graph)(y, **fmt)\n # a more peculiar plotting\n elif type_graph in ['spy']:\n handle = getattr(ax, type_graph)([x, y], **fmt)\n elif type_graph == 'stackplot':\n # look for next Curves with type == 'stackplot', and same x\n nexty = []\n fmt['labels'], fmt['colors'] = [''], ['']\n if 'label' in fmt:\n fmt['labels'] = ['' if self.attr('labelhide') else fmt['label']]\n del fmt['label']\n if 'color' in fmt:\n fmt['colors'] = [fmt['color']]\n del fmt['color']\n attrIgnore.append('color')\n if graph is not None:\n for j in range(graph_i+1, len(graph)):\n if graph[j].attr('type') == type_graph and np.array_equal(x, graph[j].x_offsets(alter=alter[0])):\n ignoreNext += 1\n if not graph[j].isHidden():\n nexty.append(graph[j].y_offsets(alter=alter[1]))\n lbl = graph[j].attr('label')\n fmt['labels'].append('' if graph[j].attr('labelhide') else lbl)\n fmt['colors'].append(graph[j].attr('color'))\n continue\n else:\n break\n if np.all([(c == '') for c in fmt['colors']]):\n del fmt['colors']\n handle = getattr(ax, type_graph)(x, y, *nexty, **fmt)\n elif type_graph == 'errorbar':\n # look for next Curves, maybe xerr/yerr was provided\n if 'xerr' in attr:\n fmt.update({'yerr': attr['xerr']})\n if 'yerr' in attr:\n fmt.update({'yerr': attr['yerr']})\n if graph is not None:\n for j in range(graph_i+1, min(graph_i+3, len(graph))):\n if len(graph[j].y()) == len(y):\n typenext = graph[j].attr('type')\n if typenext not in ['errorbar_xerr', 'errorbar_yerr']:\n break\n if typenext == 'errorbar_xerr':\n fmt.update({'xerr': graph[j].y_offsets()})\n ignoreNext += 1\n continue\n if typenext == 'errorbar_yerr':\n fmt.update({'yerr': graph[j].y_offsets()})\n ignoreNext += 1\n continue\n break\n handle = ax.errorbar(x, y, fmt=linespec, **fmt)\n elif type_graph == 'scatter':\n convert = {'markersize': 's', 'markeredgewidth': 'linewidths'}\n for key in convert:\n if key in fmt:\n fmt.update({convert[key]: fmt[key]})\n del fmt[key]\n try:\n if graph is not None:\n for j in range(graph_i+1, min(graph_i+3, len(graph))):\n typenext = graph[j].attr('type')\n if typenext not in ['scatter_c', 'scatter_s']:\n break\n if 's' not in fmt and typenext == 'scatter_s':\n fmt.update({'s': graph[j].y_offsets(alter=alter[1])})\n ignoreNext += 1\n continue\n elif 'c' not in fmt and (typenext == 'scatter_c' or np.array_equal(x, graph[j].x_offsets(alter=alter[0]))):\n fmt.update({'c': graph[j].y_offsets(alter=alter[1])})\n ignoreNext += 1\n if 'color' in fmt:\n # there cannot be both c and color keywords\n del fmt['color']\n continue\n else:\n break\n handle = ax.scatter(x, y, **fmt)\n except Exception as e:\n print('ERROR! Exception occured in Curve.plot function during',\n 'scatter.')\n print(type(e), e)\n elif type_graph == 'boxplot':\n if len(y) > 0 and not np.isnan(y).all():\n bxpltpos = self.attr('boxplot_position', None)\n boxplot['y'].append(y[~np.isnan(y)])\n boxplot['positions'].append(boxplot['i'] if bxpltpos is None else bxpltpos)\n boxplot['labels'].append(fmt['label'] if 'label' in fmt else '')\n boxplot['color'].append(fmt['color'] if 'color' in fmt else '')\n for key in ['widths', 'notch', 'vert']:\n if self.attr(key, None) is not None:\n boxplot.update({key: self.attr(key)})\n boxplot['i'] += 1\n elif type_graph == 'violinplot':\n if len(y) > 0 and not np.isnan(y).all():\n bxpltpos = self.attr('boxplot_position', None)\n violinplot['y'].append(y[~np.isnan(y)])\n violinplot['positions'].append(boxplot['i'] if bxpltpos is None else bxpltpos)\n violinplot['labels'].append(fmt['label'] if 'label' in fmt else '')\n violinplot['color'].append(fmt['color'] if 'color' in fmt else '')\n if 'showmeans' in attr:\n violinplotkwargs.update({'showmeans': attr['showmeans']})\n if 'showmedians' in attr:\n violinplotkwargs.update({'showmedians': attr['showmedians']})\n if 'showextrema' in attr:\n violinplotkwargs.update({'showextrema': attr['showextrema']})\n boxplot['i'] += 1\n elif type_graph in ['imshow', 'contour', 'contourf']:\n from grapa.curve_image import Curve_Image\n img, ignoreNext, X, Y = Curve_Image.getImageData(self, graph, graph_i, alter, ignoreNext)\n if 'label' in fmt:\n del fmt['label']\n if type_graph in ['contour', 'contourf']:\n for key in ['corner_mask', 'colors', 'alpha', 'cmap', 'norm',\n 'vmin', 'vmax', 'levels', 'origin', 'extent',\n 'locator', 'extend', 'xunits', 'yunits',\n 'antialiased', 'nchunk', 'linewidths',\n 'linestyles', 'hatches']:\n if key in attr and key not in fmt:\n fmt.update({key: attr[key]})\n # TODO: remove linewidths, linestyles for contourf, hatches for\n # contour\n args = [img]\n if (X is not None and Y is not None\n and type_graph in ['contour', 'contourf']):\n args = [X, Y] + args\n try:\n handle = getattr(ax, type_graph)(*args, **fmt)\n except Exception as e:\n print('Curve plot', type_graph, 'Exception')\n print(type(e), e)\n else:\n # default is plot (lin-lin) # also valid if no information is\n # stored, aka returned ''\n handle = ax.plot(x, y, linespec, **fmt)\n\n handles = handle if isinstance(handle, list) else [handle]\n for key in attr:\n if key not in fmt and key not in attrIgnore:\n for h in handles:\n if hasattr(h, 'set_'+key):\n try:\n getattr(h, 'set_'+key)(attr[key])\n except Exception as e:\n print('GraphIO Exception during plot kwargs',\n 'adjustment for key', key, ':', type(e))\n print(e)\n\n return handle, ignoreNext", "def plot(self, ax=None, name=None, **kwargs):\n import matplotlib.pyplot as plt\n\n if ax is None:\n fig, ax = plt.subplots()\n\n name = self.estimator_name if name is None else name\n\n line_kwargs = {\n 'label': \"{} (AUC = {:0.2f})\".format(name, self.roc_auc), \n 'lw':0.25\n\n }\n line_kwargs.update(**kwargs)\n\n self.line_ = ax.plot(self.fpr, self.tpr, **line_kwargs)[0]\n ax.set_xlabel(\"False Positive Rate\")\n ax.set_ylabel(\"True Positive Rate\")\n ax.legend(loc='lower right')\n\n self.ax_ = ax\n self.figure_ = ax.figure\n return self", "def plot(self,ax,**kwargs):\n self.XP_Plotter.plot(ax,**kwargs)\n self.lines_theory[0], = ax.plot(self.xx, self.pp_non_rel,'--g',**kwargs)\n self.lines_theory[1], = ax.plot(self.xx, self.pp_rel,'--m',**kwargs)\n self.lines_theory[2], = ax.plot(self.xx_itpl, self.pp_itpl,'-r',**kwargs)", "def plot_roc_curve(ht, scores, tp_label='tp', fp_label='fp', colors=None, title='ROC Curve', hover_mode='mouse'):\n if colors is None:\n # Get a palette automatically\n from bokeh.palettes import d3\n palette = d3['Category10'][max(3, len(scores))]\n colors = {score: palette[i] for i, score in enumerate(scores)}\n\n if isinstance(scores, str):\n scores = [scores]\n total_tp, total_fp = ht.aggregate((hl.agg.count_where(ht[tp_label]), hl.agg.count_where(ht[fp_label])))\n\n p = figure(title=title, x_axis_label='FPR', y_axis_label='TPR', tools=\"hover,save,pan,box_zoom,reset,wheel_zoom\")\n p.add_layout(Title(text=f'Based on {total_tp} TPs and {total_fp} FPs'), 'above')\n\n aucs = []\n for score in scores:\n ordered_ht = ht.key_by(_score=-ht[score])\n ordered_ht = ordered_ht.select(\n score_name=score, score=ordered_ht[score],\n tpr=hl.scan.count_where(ordered_ht[tp_label]) / total_tp,\n fpr=hl.scan.count_where(ordered_ht[fp_label]) / total_fp,\n ).key_by().drop('_score')\n last_row = hl.utils.range_table(1).key_by().select(score_name=score, score=hl.float64(float('-inf')), tpr=hl.float64(1.0), fpr=hl.float64(1.0))\n ordered_ht = ordered_ht.union(last_row)\n ordered_ht = ordered_ht.annotate(\n auc_contrib=hl.or_else((ordered_ht.fpr - hl.scan.max(ordered_ht.fpr)) * ordered_ht.tpr, 0.0)\n )\n auc = ordered_ht.aggregate(hl.agg.sum(ordered_ht.auc_contrib))\n aucs.append(auc)\n df = ordered_ht.annotate(score_name=ordered_ht.score_name + f' (AUC = {auc:.4f})').to_pandas()\n p.line(x='fpr', y='tpr', legend_field='score_name', source=ColumnDataSource(df), color=colors[score], line_width=3)\n\n p.legend.location = 'bottom_right'\n p.legend.click_policy = 'hide'\n p.select_one(HoverTool).tooltips = [(x, f\"@{x}\") for x in ('score_name', 'score', 'tpr', 'fpr')]\n p.select_one(HoverTool).mode = hover_mode\n return p, aucs", "def plot_1(ecg, sample_rate=500, title = 'ECG'):\n plt.figure(figsize=(15,2))\n plt.suptitle(title)\n plt.subplots_adjust(\n hspace = 0, \n wspace = 0.04,\n left = 0.04, # the left side of the subplots of the figure\n right = 0.98, # the right side of the subplots of the figure\n bottom = 0.2, # the bottom of the subplots of the figure\n top = 0.88\n )\n seconds = len(ecg)/sample_rate\n\n ax = plt.subplot(1, 1, 1)\n step = 1.0/sample_rate\n _ax_plot(ax,np.arange(0,len(ecg)*step,step),ecg, seconds)", "def plot_np_reliability(t, r, ci_lb=None, ci_ub=None, ax=None, linestyle='-',\n color='blue', label=None):\n if ax is None:\n ax = plt.gca()\n\n # Plot r against t as step function.\n ax.step(t, r, where='post', color=color,\n label=label, linestyle=linestyle)\n\n # Add confidence intervals if provided.\n if ci_lb is not None:\n ax.step(t, ci_lb, where='post', color=color, alpha=0.5,\n linestyle=linestyle)\n if ci_ub is not None:\n ax.step(t, ci_ub, where='post', color=color, alpha=0.5,\n linestyle=linestyle)\n\n # Add axis-labels.\n ax.set_xlabel(r'$t$')\n ax.set_ylabel(r'$\\widehat R(t)$')\n\n ax.set_xlim(0)\n\n return ax", "def plot_back(quadrant, strip):\n max_strip = 12\n pos = (quadrant-1)*max_strip + strip-1\n #ch_b = np.concatenate((b_ch_Ni[:, pos], b_ch_Pb[2:, pos], b_ch_Sm[:, pos])) # Excluding f1 & f2 for Pb\n #E_b = np.concatenate((b_E[0, :], b_E[1, 2:], b_E[2, :]))\n ch_b = np.concatenate((b_ch_Pb[:, pos], b_ch_Sm[:, pos]))\n E_b = np.concatenate((b_E[0, :], b_E[1, :]))\n if pos in back_special_case:\n #ch_b = np.concatenate((b_ch_Ni[:, pos], b_ch_Pb[2:, pos], b_ch_Sm[1:, pos]))\n #E_b = np.concatenate((b_E[0, :], b_E[1, 2:], b_E[2, 1:]))\n ch_b = np.concatenate((b_ch_Pb[1:, pos], b_ch_Sm[1:, pos]))\n E_b = np.concatenate((b_E[0, 1:], b_E[1, 1:]))\n fig = plt.figure()\n plt.plot(ch_b, E_b, color='blue', marker='x', linestyle='None')\n plt.plot(ch_b, b_gain[pos]*ch_b+b_offset[pos], color='red')\n plt.title(\"Quadrant {}, strip {}\".format(quadrant, strip))\n plt.xlabel(\"Channel\")\n plt.ylabel(\"E (keV)\")\n #plt.legend([\"Ni, (Pb), Sm\", \"Lin. fit\"], loc=0)\n plt.legend([\"Pb, Sm\", \"Lin. fit\"], loc=0)\n fig.set_tight_layout(True)\n plt.show()", "def scatters(\n adata,\n basis=\"umap\",\n x=0,\n y=1,\n color='ntr',\n layer=\"X\",\n highlights=None,\n labels=None,\n values=None,\n theme=None,\n cmap=None,\n color_key=None,\n color_key_cmap=None,\n background=None,\n ncols=4,\n pointsize=None,\n figsize=(6, 4),\n show_legend=\"on data\",\n use_smoothed=True,\n aggregate=None,\n show_arrowed_spines=False,\n ax=None,\n sort='raw',\n save_show_or_return=\"show\",\n save_kwargs={},\n return_all=False,\n add_gamma_fit=False,\n frontier=False,\n contour=False,\n ccmap=None,\n calpha=2.3,\n sym_c=False,\n smooth=False,\n **kwargs\n):\n\n import matplotlib.pyplot as plt\n from matplotlib import rcParams\n from matplotlib.colors import to_hex\n\n if contour: frontier = False\n\n if background is None:\n _background = rcParams.get(\"figure.facecolor\")\n _background = to_hex(_background) if type(_background) is tuple else _background\n set_figure_params('dynamo', background=_background)\n else:\n _background = background\n set_figure_params('dynamo', background=_background)\n\n x, y = [x] if type(x) in [int, str] else x, [y] if type(y) in [int, str] else y\n if all([is_gene_name(adata, i) for i in basis]):\n if x[0] not in ['M_s', 'X_spliced'] and y[0] not in ['M_u', 'X_unspliced']:\n if ('M_s' in adata.layers.keys() and 'M_u' in adata.layers.keys()):\n x, y = ['M_s'], ['M_u']\n elif ('X_spliced' in adata.layers.keys() and 'X_unspliced' in adata.layers.keys()):\n x, y = ['X_spliced'], ['X_unspliced']\n else:\n x, y = ['spliced'], ['unspliced']\n\n if use_smoothed:\n mapper = get_mapper()\n\n # check layer, basis -> convert to list\n\n if type(color) is str:\n color = [color]\n if type(layer) is str:\n layer = [layer]\n if type(basis) is str:\n basis = [basis]\n n_c, n_l, n_b, n_x, n_y = (\n 1 if color is None else len(color),\n 1 if layer is None else len(layer),\n 1 if basis is None else len(basis),\n 1 if x is None else len(x),\n 1 if y is None else len(y),\n )\n\n point_size = (\n 16000.0 / np.sqrt(adata.shape[0])\n if pointsize is None\n else 16000.0 / np.sqrt(adata.shape[0]) * pointsize\n )\n\n scatter_kwargs = dict(\n alpha=0.1, s=point_size, edgecolor=None, linewidth=0, rasterized=True\n ) # (0, 0, 0, 1)\n if kwargs is not None:\n scatter_kwargs.update(kwargs)\n\n font_color = _select_font_color(_background)\n\n total_panels, ncols = n_c * n_l * n_b * n_x * n_y, min(max([n_c, n_l, n_b, n_x, n_y]), ncols)\n nrow, ncol = int(np.ceil(total_panels / ncols)), ncols\n if figsize is None:\n figsize = plt.rcParams[\"figsize\"]\n\n if total_panels >= 1 and ax is None:\n plt.figure(None, (figsize[0] * ncol, figsize[1] * nrow), facecolor=_background)\n gs = plt.GridSpec(nrow, ncol, wspace=0.12)\n\n i = 0\n axes_list, color_list = [], []\n for cur_b in basis:\n for cur_l in layer:\n if use_smoothed:\n cur_l_smoothed = mapper[cur_l]\n prefix = cur_l + \"_\"\n\n # if prefix + cur_b in adata.obsm.keys():\n # if type(x) != str and type(y) != str:\n # x_, y_ = (\n # adata.obsm[prefix + cur_b][:, int(x)],\n # adata.obsm[prefix + cur_b][:, int(y)],\n # )\n # else:\n # continue\n for cur_c in color:\n cur_title = cur_c\n if cur_l in [\"protein\", \"X_protein\"]:\n _color = adata.obsm[cur_l].loc[cur_c, :]\n else:\n _color = adata.obs_vector(cur_c, layer=None) if cur_l == 'X' else adata.obs_vector(cur_c, layer=cur_l)\n for cur_x, cur_y in zip(x, y):\n if type(cur_x) is int and type(cur_y) is int:\n points = pd.DataFrame(\n {\n cur_b + \"_0\": adata.obsm[prefix + cur_b][:, cur_x],\n cur_b + \"_1\": adata.obsm[prefix + cur_b][:, cur_y],\n }\n )\n points.columns = [cur_b + \"_0\", cur_b + \"_1\"]\n elif is_gene_name(adata, cur_x) and is_gene_name(adata, cur_y):\n points = pd.DataFrame(\n {\n cur_x: adata.obs_vector(k=cur_x, layer=None) if cur_l_smoothed == 'X' else adata.obs_vector(k=cur_x, layer=cur_l_smoothed),\n cur_y: adata.obs_vector(k=cur_y, layer=None) if cur_l_smoothed == 'X' else adata.obs_vector(k=cur_y, layer=cur_l_smoothed),\n }\n )\n # points = points.loc[(points > 0).sum(1) > 1, :]\n points.columns = [\n cur_x + \" (\" + cur_l_smoothed + \")\",\n cur_y + \" (\" + cur_l_smoothed + \")\",\n ]\n cur_title = cur_x + ' VS ' + cur_y\n elif is_cell_anno_column(adata, cur_x) and is_cell_anno_column(adata, cur_y):\n points = pd.DataFrame(\n {\n cur_x: adata.obs_vector(cur_x),\n cur_y: adata.obs_vector(cur_y),\n }\n )\n points.columns = [cur_x, cur_y]\n cur_title = cur_x + ' VS ' + cur_y\n elif is_cell_anno_column(adata, cur_x) and is_gene_name(adata, cur_y):\n points = pd.DataFrame(\n {\n cur_x: adata.obs_vector(cur_x), \n cur_y: adata.obs_vector(k=cur_y, layer=None) if cur_l_smoothed == 'X' else adata.obs_vector(k=cur_y, layer=cur_l_smoothed),\n }\n )\n # points = points.loc[points.iloc[:, 1] > 0, :]\n points.columns = [cur_x, cur_y + \" (\" + cur_l_smoothed + \")\"]\n cur_title = cur_y\n elif is_gene_name(adata, cur_x) and is_cell_anno_column(adata, cur_y):\n points = pd.DataFrame(\n {\n cur_x: adata.obs_vector(k=cur_x, layer=None) if cur_l_smoothed == 'X' else adata.obs_vector(k=cur_x, layer=cur_l_smoothed), \n cur_y: adata.obs_vector(cur_y)\n }\n )\n # points = points.loc[points.iloc[:, 0] > 0, :]\n points.columns = [cur_x + \" (\" + cur_l_smoothed + \")\", cur_y]\n cur_title = cur_x\n elif is_layer_keys(adata, cur_x) and is_layer_keys(adata, cur_y):\n add_gamma_fit = True\n cur_x_, cur_y_ = adata[:, cur_b].layers[cur_x], adata[:, cur_b].layers[cur_y]\n points = pd.DataFrame(\n {cur_x: flatten(cur_x_),\n cur_y: flatten(cur_y_)}\n )\n # points = points.loc[points.iloc[:, 0] > 0, :]\n points.columns = [cur_x, cur_y]\n cur_title = cur_b\n if aggregate is not None:\n groups, uniq_grp = (\n adata.obs[aggregate],\n adata.obs[aggregate].unique().to_list(),\n )\n group_color, group_median = (\n np.zeros((1, len(uniq_grp))).flatten()\n if isinstance(_color[0], Number)\n else np.zeros((1, len(uniq_grp))).astype(\"str\").flatten(),\n np.zeros((len(uniq_grp), 2)),\n )\n\n grp_size = adata.obs[aggregate].value_counts().values\n scatter_kwargs = (\n {\"s\": grp_size}\n if scatter_kwargs is None\n else update_dict(scatter_kwargs, {\"s\": grp_size})\n )\n\n for ind, cur_grp in enumerate(uniq_grp):\n group_median[ind, :] = np.nanmedian(\n points.iloc[np.where(groups == cur_grp)[0], :2], 0\n )\n if isinstance(_color[0], Number):\n group_color[ind] = np.nanmedian(\n np.array(_color)[np.where(groups == cur_grp)[0]]\n )\n else:\n group_color[ind] = (\n pd.Series(_color)[np.where(groups == cur_grp)[0]]\n .value_counts()\n .index[0]\n )\n\n points, _color = (\n pd.DataFrame(\n group_median, index=uniq_grp, columns=points.columns\n ),\n group_color,\n )\n # https://stackoverflow.com/questions/4187185/how-can-i-check-if-my-python-object-is-a-number\n # answer from Boris.\n is_not_continous = (\n not isinstance(_color[0], Number) or _color.dtype.name == 'category'\n )\n\n if is_not_continous:\n labels = _color.to_dense() if is_categorical(_color) else _color\n if theme is None:\n if _background in [\"#ffffff\", \"black\"]:\n _theme_ = \"glasbey_dark\"\n else:\n _theme_ = \"glasbey_white\"\n else:\n _theme_ = theme\n else:\n values = _color\n if theme is None:\n if _background in [\"#ffffff\", \"black\"]:\n _theme_ = (\n \"inferno\"\n if cur_l != \"velocity\"\n else \"div_blue_black_red\"\n )\n else:\n _theme_ = (\n \"viridis\" if not cur_l.startswith(\"velocity\") else \"div_blue_red\"\n )\n else:\n _theme_ = theme\n\n _cmap = _themes[_theme_][\"cmap\"] if cmap is None else cmap\n _color_key_cmap = (\n _themes[_theme_][\"color_key_cmap\"]\n if color_key_cmap is None\n else color_key_cmap\n )\n _background = (\n _themes[_theme_][\"background\"] if _background is None else _background\n )\n\n if labels is not None and values is not None:\n raise ValueError(\n \"Conflicting options; only one of labels or values should be set\"\n )\n\n if total_panels > 1:\n ax = plt.subplot(gs[i])\n i += 1\n\n # if highligts is a list of lists - each list is relate to each color element\n if highlights is not None:\n if is_list_of_lists(highlights):\n _highlights = highlights[color.index(cur_c)]\n _highlights = (\n _highlights\n if all([i in _color for i in _highlights])\n else None\n )\n else:\n _highlights = (\n highlights\n if all([i in _color for i in highlights])\n else None\n )\n\n color_out = None\n\n if smooth and not is_not_continous:\n knn = adata.obsp['moments_con']\n values = calc_1nd_moment(values, knn)[0] if smooth in [1, True] else \\\n calc_1nd_moment(values, knn**smooth)[0]\n\n if points.shape[0] <= figsize[0] * figsize[1] * 100000:\n ax, color_out = _matplotlib_points(\n points.values,\n ax,\n labels,\n values,\n highlights,\n _cmap,\n color_key,\n _color_key_cmap,\n _background,\n figsize[0],\n figsize[1],\n show_legend,\n sort=sort,\n frontier=frontier,\n contour=contour,\n ccmap=ccmap,\n calpha=calpha,\n sym_c=sym_c,\n **scatter_kwargs\n )\n else:\n ax = _datashade_points(\n points.values,\n ax,\n labels,\n values,\n highlights,\n _cmap,\n color_key,\n _color_key_cmap,\n _background,\n figsize[0],\n figsize[1],\n show_legend,\n sort=sort,\n frontier=frontier,\n contour=contour,\n ccmap=ccmap,\n calpha=calpha,\n sym_c=sym_c,\n **scatter_kwargs\n )\n\n if i == 1 and show_arrowed_spines:\n arrowed_spines(ax, points.columns[:2], _background)\n else:\n despline_all(ax)\n deaxis_all(ax)\n\n ax.set_title(cur_title)\n\n axes_list.append(ax)\n color_list.append(color_out)\n\n labels, values = None, None # reset labels and values\n\n if add_gamma_fit and cur_b in adata.var_names[adata.var.use_for_dynamics]:\n xnew = np.linspace(0, points.iloc[:, 0].max() * 0.80)\n k_name = 'gamma_k' if adata.uns['dynamics']['experiment_type'] == 'one-shot' else 'gamma'\n if k_name in adata.var.columns:\n if (\n not (\"gamma_b\" in adata.var.columns)\n or all(adata.var.gamma_b.isna())\n ):\n adata.var.loc[:, \"gamma_b\"] = 0\n ax.plot(\n xnew,\n xnew * adata[:, cur_b].var.loc[:, k_name].unique()\n + adata[:, cur_b].var.loc[:, \"gamma_b\"].unique(),\n dashes=[6, 2],\n c=font_color,\n )\n else:\n raise Exception(\n \"adata does not seem to have velocity_gamma column. Velocity estimation is required before \"\n \"running this function.\"\n )\n if save_show_or_return == \"save\":\n s_kwargs = {\"path\": None, \"prefix\": 'scatters', \"dpi\": None,\n \"ext\": 'pdf', \"transparent\": True, \"close\": True, \"verbose\": True}\n s_kwargs = update_dict(s_kwargs, save_kwargs)\n\n save_fig(**s_kwargs)\n if background is not None: reset_rcParams()\n elif save_show_or_return == \"show\":\n if show_legend:\n plt.subplots_adjust(right=0.85)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n plt.tight_layout()\n \n plt.show()\n if background is not None: reset_rcParams()\n elif save_show_or_return == \"return\":\n if background is not None: reset_rcParams()\n\n if return_all:\n return (axes_list, color_list, font_color) if total_panels > 1 else (ax, color_out, font_color)\n else:\n return axes_list if total_panels > 1 else ax", "def plot_tuning_curves(self, baseline_rate=10.):\n x = np.arange(0, 1 + 0.01, 0.01)\n l0 = self.data['L0']\n l1 = self.data['L1']\n y_on = np.exp(np.log(l0) + x * np.log(l1 / l0))\n y_off = np.exp(np.log(l0) + (1 - x) * np.log(l1 / l0))\n plt.plot(x, y_on, label='ON')\n plt.plot(x, y_off, label='OFF')\n plt.plot(x, baseline_rate + 0 * x, '--')\n # plt.xlabel('Stimulus intensity')\n # plt.ylabel('Firing Rate (Hz)')\n # plt.title('Firing rate as a function \\n of Stimulus Intensity')\n # plt.legend()", "def draw_roc(signal, background, output_dir=\".\", output_name=\"roc\", form=\".pdf\"):\n\n x, y = get_roc(signal, background)\n file_path = output_dir + \"/\"+ output_name + \"_X.cvs\"\n numpy.savetxt(file_path, x, delimiter=\",\")\n file_path = output_dir + \"/\"+ output_name + \"_Y.cvs\"\n numpy.savetxt(file_path, y, delimiter=\",\")\n output_name = output_name + form\n\n auc = metrics.auc(x, y, reorder=True)\n\n fig = plt.figure(1, figsize=(7, 7), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n ax.plot(x, y, '-', color='#B64926', lw=2, label=\"AUC: %.4f\" % auc)\n ax.margins(0.05)\n\n ax.set_xlabel(\"Background efficiency\")\n ax.set_ylabel(\"Signal efficiency\")\n \n fig.set_tight_layout(True)\n\n ax.legend(loc='lower right', numpoints=1, frameon=False)\n\n print(\"AUC: %.4f\" % auc)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()\n\n def get_index(y, value):\n \"\"\"\n Find the last index of the element in y\n satistying y[index] <= value\n \"\"\"\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i\n\n print(\"Background efficiency for signal efficiency of 0.70: %f\" % x[get_index(y, 0.70)])\n print(\"Background efficiency for signal efficiency of 0.80: %f\" % x[get_index(y, 0.80)])\n print(\"Background efficiency for signal efficiency of 0.90: %f\" % x[get_index(y, 0.90)])", "def plot_equivalent_samples(\n self, acquisition_risks, baseline=None, errors='std',\n fig=None, ax=None, alpha=0.3, i=0, labels=None, zorders=None,\n colors=None, relative=True, rolling_before=False,\n rolling_after=False, inverse=False):\n if errors == 'percentiles':\n upper_base = self.quant_errors\n elif errors == 'std':\n upper_base = self.errors\n else:\n raise ValueError\n\n if fig is None or ax is None:\n fig, ax = plt.subplots(dpi=200)\n\n if baseline is None:\n baselines = ['RandomAcquisition', 'BiasedRiskEstimator']\n\n base_risk = upper_base.loc[baselines[0]][baselines[1]].values\n if zorders is None:\n zorders = 100 * [None]\n\n linestyles = itertools.cycle(['--', '-.', ':'])\n for acquisition, risk in acquisition_risks:\n acq_risk = f'{acquisition}_{risk}'\n if colors is None:\n color = acquisition_risks_to_color[acq_risk]\n else:\n color = colors[i]\n\n s_u = upper_base.loc[acquisition][risk].values\n if (R := rolling_before) is not False:\n s_u = np.convolve(\n s_u, np.ones(R)/R, mode='valid')\n base_risk = np.convolve(\n base_risk, np.ones(R)/R, mode='valid')\n\n diffs = s_u[:, np.newaxis] - base_risk\n diffs[diffs < 0] = 1e10\n idxs = np.argmin(diffs, axis=1) + 1\n x = range(1, len(idxs)+1)\n if relative:\n y = idxs/x\n else:\n y = idxs\n\n if (R := rolling_after) is not False:\n y = np.convolve(y, np.ones(R)/R, mode='valid')\n x = range(1, len(y)+1)\n\n if inverse:\n y = 1/y\n\n ax.plot(y, '-', color=color, label=labels[i],\n zorder=zorders[i])\n i += 1\n\n return fig, ax", "def plot_pros(self, data_audio, fs, F0, segmentsV, segmentsU, F0_features):\n plt.figure(figsize=(6, 6))\n plt.subplot(211)\n ax1 = plt.gca()\n t = np.arange(len(data_audio))/float(fs)\n colors = cm.get_cmap('Accent', 5)\n ax1.plot(t, data_audio, 'k', label=\"speech signal\",\n alpha=0.5, color=colors.colors[4])\n ax1.set_ylabel('Amplitude', fontsize=12)\n ax1.set_xlabel('Time (s)', fontsize=12)\n ax1.set_xlim([0, t[-1]])\n ax2 = ax1.twinx()\n fsp = len(F0)/t[-1]\n t2 = np.arange(len(F0))/fsp\n ax2.plot(\n t2, F0, color=colors.colors[0], linewidth=2, label=r\"Real $F_0$\", alpha=0.5)\n ax2.set_ylabel(r'$F_0$ (Hz)', color=colors.colors[0], fontsize=12)\n ax2.tick_params('y', colors=colors.colors[0])\n\n p0 = np.where(F0 != 0)[0]\n f0avg = np.nanmean(np.where(F0 != 0, F0, np.nan))\n f0std = np.std(F0[p0])\n\n ax2.plot([t2[0], t2[-1]], [f0avg, f0avg],\n color=colors.colors[2], label=r\"Avg. $F_0$\")\n ax2.fill_between([t2[0], t2[-1]], y1=[f0avg+f0std, f0avg+f0std], y2=[f0avg-f0std,\n f0avg-f0std], color=colors.colors[2], alpha=0.2, label=r\"Avg. $F_0\\pm$ SD.\")\n F0rec = polyf0(F0)\n ax2.plot(t2, F0rec, label=r\"estimated $F_0$\",\n c=colors.colors[1], linewidth=2.0)\n plt.text(t2[2], np.max(F0)-5, r\"$F_0$ SD.=\" +\n str(np.round(f0std, 1))+\" Hz\")\n plt.text(t2[2], np.max(F0)-20, r\"$F_0$ tilt.=\" +\n str(np.round(F0_features[6], 1))+\" Hz\")\n\n plt.legend(ncol=2, loc=8)\n\n plt.subplot(212)\n size_frameS = 0.02*float(fs)\n size_stepS = 0.01*float(fs)\n\n logE = energy_cont_segm([data_audio], size_frameS, size_stepS)\n Esp = len(logE[0])/t[-1]\n t2 = np.arange(len(logE[0]))/float(Esp)\n plt.plot(t2, logE[0], color='k', linewidth=2.0)\n plt.xlabel('Time (s)', fontsize=12)\n plt.ylabel('Energy (dB)', fontsize=12)\n plt.xlim([0, t[-1]])\n plt.grid(True)\n plt.tight_layout()\n plt.show()\n\n plt.figure(figsize=(6, 3))\n Ev = energy_cont_segm(segmentsV, size_frameS, size_stepS)\n Eu = energy_cont_segm(segmentsU, size_frameS, size_stepS)\n\n plt.plot([np.mean(Ev[j])\n for j in range(len(Ev))], label=\"Voiced energy\")\n plt.plot([np.mean(Eu[j])\n for j in range(len(Eu))], label=\"Unvoiced energy\")\n\n plt.xlabel(\"Number of segments\")\n plt.ylabel(\"Energy (dB)\")\n plt.legend()\n plt.grid()\n plt.tight_layout()\n plt.show()", "def plot_curve(self, true_values, predictions, ax=None, title='ROC', label='ROC', lw=1, add_auc=True, **kwargs):\n fpr, tpr, _ = roc_curve(true_values, predictions)\n roc_auc = auc(fpr, tpr)\n label_auc = label + ': {:.3f} AUC'.format(roc_auc)\n logging.info('ROC result: %s', label_auc)\n ax.plot(fpr, tpr, lw=lw, label=label_auc if add_auc else label, **kwargs)\n ax.set_title(title)\n ax.set_xlabel('FPR')\n ax.set_ylabel('TPR')\n ax.legend(loc='lower right', frameon=False)\n return ax", "def plot_inline(a, frq=60e6, title=''):\n w = np.where(a.flag)\n x = a.xo.values.astype('float')[w]\n crl = a.crl.values.astype('float')[w]\n pt = a.pt.values.astype('float')[w]\n pc = a.pc.values.astype('float')[w]\n pn = a.pn.values.astype('float')[w]\n eps = a.eps.values.astype('float')[w]\n sh = a.sh.values.astype('float')[w]\n\n plt.figure(figsize=(15,10))\n\n #--------------------------------------------------------------------------\n # Correlation Coefficient\n #--------------------------------------------------------------------------\n ax_crl = plt.subplot2grid((5, 1), (0, 0))\n plt.plot(x, crl, 'o-', color='k')\n plt.grid(alpha=.5)\n plt.ylabel(r'Correl. Coeff.', size=17)\n plt.xticks(size='10')\n plt.yticks(size='15')\n plt.title(title, size='15')\n\n #--------------------------------------------------------------------------\n # Signal components\n #--------------------------------------------------------------------------\n ax_pwr = plt.subplot2grid((5,1), (1, 0), rowspan=2)\n ax_pwr.fill_between(x, pc, pn, where=pc>=pn, facecolor='k', alpha=.05, interpolate=True)\n ax_pwr.fill_between(x, pc, pn, where=pc<=pn, facecolor='k', alpha=.4, interpolate=True)\n plt.plot(x, pc, color='k', lw=3, alpha=.9, label=r'Reflectance $(P_c)$')\n plt.plot(x, pn, color='k', lw=3, alpha=.6, label=r'Scattering $(P_n)$')\n plt.ylim([-40,0])\n plt.grid(alpha=.5)\n plt.ylabel(r'Power $[dB]$', size=17)\n plt.yticks(size='15')\n plt.xticks(size='10')\n plt.legend(loc='lower right', fancybox=True).get_frame().set_alpha(0.5)\n\n #--------------------------------------------------------------------------\n # Permittivity\n #--------------------------------------------------------------------------\n ax_eps = plt.subplot2grid((5,1), (3, 0), rowspan=2)\n plt.semilogy(x, eps, color='k', lw=3, alpha=.9, label=r'Permittivity $(\\epsilon)$')\n plt.ylim(1,100)\n plt.grid(True, which='both', alpha=.5)\n plt.ylabel('Permittivity', size=17)\n plt.xticks(size='10')\n plt.xlabel('Frame #', size=12)\n plt.yticks(size='15')\n ax_eps.set_yticks([1, 10, 100])\n ax_eps.set_yticklabels(['1', '10', '100'])\n\n #--------------------------------------------------------------------------\n # RMS height\n #--------------------------------------------------------------------------\n ax_sh = ax_eps.twinx()\n plt.semilogy(x, sh, '-', color='k', lw=3, alpha=.3, label=r'RMS height $(\\sigma_h)$')\n plt.semilogy(x, eps, color='k', lw=3, alpha=.9, label=r'Permittivity $(\\epsilon)$')\n plt.ylim(0.01,1)\n plt.ylabel(r'RMS height $[m]$', size=17)\n plt.yticks(size='15')\n ax_sh.set_yticks([.01, .1, 1])\n ax_sh.set_yticklabels(['0.01', '0.1', '1'])\n ax_sh.set # TODO: what is this\n plt.legend(loc='upper right', fancybox=True).get_frame().set_alpha(0.5)", "def osteoAblationPlot():\n x = pickle.load(open(\"pickles/ablation_osteo_x_fold_1.pkl\", \"rb\"))\n y1 = pickle.load(open(\"pickles/ablation_osteo_y_fold_1.pkl\", \"rb\"))\n y2 = pickle.load(open(\"pickles/ablation_osteo_y_fold_2.pkl\", \"rb\"))\n # y3 = pickle.load(open(\"pickles/ablation_osteo_y_fold_3.pkl\", \"rb\"))\n y_avg = []\n y_std = []\n for i in range(0, len(x)):\n y_avg.append(np.mean(y1[i] + y2[i]))# + y3[i]))\n y_std.append(np.std(y1[i] + y2[i]))# + y3[i]))\n\n # sorted_pairs = sorted(zip(x, y_avg, y_std))\n # tuples = zip(*sorted_pairs)\n # x, y_avg, y_std = [ list(tuple) for tuple in tuples]\n\n fig, ax = plt.subplots()\n plt.xlabel(\"Intensity Percentile of Hoechst Ablated\", fontname=\"Times New Roman\", fontsize=12) \n x = [val * 100 for val in x]\n x_vals = ax.get_xticks()\n x_vals = np.insert(x_vals, 0, 0)\n ax.set_xticklabels(['{:,.0%}'.format(x_val) for x_val in x_vals], fontname=\"Times New Roman\")\n ax.axhline(.44, linestyle=\"--\", color='black', lw=.80, alpha=0.8)\n ax.errorbar(x, y_avg, yerr=y_std, capsize=1.5, elinewidth=.2, ecolor=\"black\", label=\"ML Model\")\n ax.set_ylabel(\"Average Pearson Correlation Over Test Set\", fontname=\"Times New Roman\", fontsize=12)\n plt.axis((-.02,102,0,1))\n plt.title(\"Pearson Performance with Increasing Ablations\", fontname=\"Times New Roman\", fontsize=14)\n for tick in ax.get_xticklabels():\n tick.set_fontname(\"Times New Roman\")\n for tick in ax.get_yticklabels():\n tick.set_fontname(\"Times New Roman\")\n plt.savefig(\"matplotlib_figures/ablation_osteo.png\", dpi=300)", "def plot_psychometric(df, color='black', ax=None, **kwargs):\n\n if len(df['signedContrast'].unique()) > 4:\n df2 = df.groupby(['signedContrast']).agg(\n {'choice': 'count', 'choice2': 'mean'}).reset_index()\n df2.rename(columns={\"choice2\": \"fraction\", \"choice\": \"ntrials\"}, inplace=True)\n\n pars, L = psy.mle_fit_psycho(df2.transpose().values, # extract the data from the df\n P_model='erf_psycho_2gammas',\n parstart=np.array([df2['signedContrast'].mean(), 20., 0.05,\n 0.05]),\n parmin=np.array([df2['signedContrast'].min(), 0., 0., 0.]),\n parmax=np.array([df2['signedContrast'].max(), 100., 1, 1]))\n sns.lineplot(np.arange(-100, 100), psy.erf_psycho_2gammas(pars, np.arange(-100, 100)),\n color=color, ax=ax)\n\n if 100 in df.signedContrast.values and not 50 in df.signedContrast.values:\n df['signedContrast'] = df.signedContrast.replace(-100, -35)\n df['signedContrast'] = df.signedContrast.replace(100, 35)\n\n brokenXaxis = True\n else:\n brokenXaxis = False\n\n # plot datapoints on top\n sns.lineplot(x='signedContrast', y='choice2', err_style=\"bars\", linewidth=0, linestyle='None',\n mew=0.5,\n marker='.', ci=68, data=df, color=color, ax=ax)\n\n if not brokenXaxis:\n # Reduce the clutter\n ax.set_xticks([-100, -50, 0, 50, 100])\n ax.set_xticklabels(['-100', '-50', '0', '50', '100'])\n ax.set_xlim([-110, 110])\n else:\n ax.set_xticks([-35, -25, -12.5, -6, 0, 6, 12.5, 25, 35])\n ax.set_xticklabels(['-100', '-25', '-12.5', '-6.25', '0', '6.25', '12.5', '25', '100'],\n size='x-small', rotation=-90)\n ax.set_xlim([-40, 40])\n\n ax.set_yticks([0, .5, 1])\n ax.set_ylim([-0.03, 1.03])\n ax.set_xlabel('Contrast (%)')\n\n return ax", "def plot_roc(self, ax, prob, y, label='ROC'):\n self.df = self.calculate_threshold_values(prob, y)\n ax.plot([1] + list(self.df.fpr), [1] + list(self.df.tpr), label=label)\n x = [1] + list(self.df.fpr)\n y1 = [1] + list(self.df.tpr)\n y2 = x\n ax.fill_between(x, y1, y2, alpha=0.2)\n ax.set_xlabel('fpr')\n ax.set_ylabel('tpr')\n ax.set_title('ROC Curve')\n ax.legend()", "def plot_extmodels(extdata, alax=False, wavenum=False):\n x = np.arange(0.1, 3.33, 0.01) * u.micron\n Rvs = [2.0, 3.1, 4.0, 5.0]\n style = [\"--\", \"-\", \":\", \"-.\"]\n for i, cRv in enumerate(Rvs):\n curve = CCM89(Rv=cRv)\n if alax:\n if extdata.type_rel_band != \"V\":\n emod = CCM89(cRv)\n (indx,) = np.where(extdata.type_rel_band == extdata.names[\"BAND\"])\n axav = emod(extdata.waves[\"BAND\"][indx[0]])\n else:\n axav = 1.0\n y = curve(x) / axav\n else:\n # compute A(V)\n if \"AV\" not in extdata.columns.keys():\n extdata.calc_AV()\n # convert the model curve from A(lambda)/A(V) to E(lambda-V), using the computed A(V) of the data.\n y = (curve(x) - 1) * extdata.columns[\"AV\"][0]\n\n if wavenum:\n px = 1 / x\n else:\n px = x\n plt.plot(\n px.value,\n y,\n style[i],\n color=\"k\",\n alpha=0.7,\n linewidth=1,\n label=\"R(V) = {:4.1f}\".format(cRv),\n )\n # allow to find the best position (supports regular and wavenum)\n plt.legend()", "def plot_12(\n ecg, \n sample_rate = 500, \n title = 'ECG 12', \n lead_index = lead_index, \n lead_order = None,\n columns = 2\n ):\n if not lead_order:\n lead_order = list(range(0,len(ecg)))\n\n leads = len(lead_order)\n seconds = len(ecg[0])/sample_rate\n\n plt.rcParams.update({'font.size': 8})\n fig, ax = plt.subplots(\n ceil(len(lead_order)/columns),columns,\n sharex=True, \n sharey=True,\n figsize=(0.7*seconds*columns, 1.1*leads/columns)\n )\n fig.subplots_adjust(\n hspace = 0, \n wspace = 0.04,\n left = 0.04, # the left side of the subplots of the figure\n right = 0.98, # the right side of the subplots of the figure\n bottom = 0.06, # the bottom of the subplots of the figure\n top = 0.95\n )\n fig.suptitle(title)\n\n step = 1.0/sample_rate\n\n for i in range(0, len(lead_order)):\n if(columns == 1):\n t_ax = ax[i]\n else:\n t_ax = ax[i//columns,i%columns]\n t_lead = lead_order[i]\n t_ax.set_ylabel(lead_index[t_lead])\n t_ax.tick_params(axis='x',rotation=90)\n \n _ax_plot(t_ax, np.arange(0, len(ecg[t_lead])*step, step), ecg[t_lead], seconds)", "def robustnessPlot(ax):\n # Setup the range of avidity and ligand concentration we'll look at\n gnus = np.logspace(1, 3, 3, base=2, dtype=np.int)\n Los = np.logspace(start=-11, stop=-7, num=35, dtype=np.float)\n\n pp = pd.DataFrame(np.array(np.meshgrid(gnus, Los)).T.reshape(-1, 2),\n columns=['gnus', 'Los'])\n\n pp['CPredict'] = pp.apply(lambda x: InVivoPredict(x.values)[1], axis=1)\n\n # Change avidities to strings\n pp['gnus'] = pp['gnus'].apply(lambda gnu: r'$\\nu=' + str(int(gnu)) + '$')\n\n avcolors = dict(zip(pp['gnus'].unique(), sns.color_palette()[1:]))\n\n # Plot the calculated crossvalidation performance\n sns.FacetGrid(pp,\n hue='gnus',\n palette=sns.color_palette()[1:]).map(ax.semilogx, 'Los', 'CPredict')\n\n ax.legend(handles=Legend(pp['gnus'].unique(), avcolors, [], {}), bbox_to_anchor=(1, 1), loc=2)\n\n ax.set_xlabel('Assumed IC Conc. (M)')\n ax.set_ylabel('LOO Prediction R-Squared')\n ax.set_ylim(0.0, 1.0)", "def plot_sub_spreads( fixed_times, bayes_spreads, naive_spreads,\n init_conditions, ax, init_radius=None):\n ax.plot(fixed_times, naive_spreads, label=\"Naive fit\")\n ax.plot(fixed_times, bayes_spreads, label=\"Bayes fit\")\n ax.set_xlim(fixed_times[0], fixed_times[-1])\n ax.set_ylim(\n bottom=0.0, top=max(np.max(naive_spreads), np.max(bayes_spreads))\n )\n if init_conditions is not None:\n init_age = init_conditions[13]\n ax.axvline(\n init_age, ax.get_ylim()[0], ax.get_ylim()[1], color='r', ls='--'\n )\n\n if init_radius is not None:\n ax.axhline(\n init_radius, ax.get_xlim()[0], ax.get_xlim()[1], color='r', ls='--'\n )\n ax.legend(loc=1)\n ax.set_xlabel(\"Traceback Time [Myr]\")\n ax.set_ylabel(\"Radius of average spread in XYZ [pc]\")", "def sample_and_plot(S0, K, B, T, N, u, d, q, M, barrier_type):\n paths = sample_paths(S0, N, u, d, q, M)\n p_valid, p_invalid, p_counts = split_paths(paths, B, K, \n barrier_type, option)\n\n times = np.linspace(0, T, N+1)\n\n fig = plt.figure(figsize=(10,7))\n ax1 = plt.subplot2grid((1,1),(0,0))\n ax1.set_ylabel('Stock price (log-scale)')\n ax1.set_xlabel('time')\n for path in p_invalid:\n ax1.plot(times, path, c='lightcoral')\n for path in p_valid:\n ax1.plot(times, path, c='grey')\n for path in p_counts:\n ax1.plot(times, path, c='blue')\n \n custom_lines = [Line2D([0], [0], c='lightcoral', lw=2),\n Line2D([0], [0], c='grey', lw=2),\n Line2D([0], [0], c='blue', lw=2), \n Line2D([0], [0], c='red', ls=':', lw=2), \n Line2D([0], [0], c='navy', ls=':', lw=2)]\n \n ax1.axhline(y=K, lw=4, c = 'navy', ls = ':', label = 'Strike Price')\n ax1.axhline(y=B, lw=4, c = 'red', ls = ':', label = 'Barrier')\n \n plt.yscale('log') \n ax1.legend(custom_lines, ['invalid (barrier)', 'invalid (option)', 'valid', \n 'barrier', 'strike price'])\n #plt.savefig('up-and-out_call.png', transparent=True)\n plt.show()", "def plot_roc(y_true, y_probas, classes = None, title='ROC Curves', average_plot = True,\n ax=None, figsize=None, cmap='nipy_spectral',\n title_fontsize=\"large\", text_fontsize=\"medium\"):\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n ax.set_title(title, fontsize=title_fontsize)\n n_fold_roc_auc = []\n for i in range(len(y_true)):\n fpr, tpr, _ = roc_curve(y_true[i], y_probas[i])\n roc_auc = auc(fpr, tpr)\n color = plt.cm.get_cmap(cmap)(float(i) / len(y_true))\n \n if classes is None:\n s = 'fold'\n else:\n s = classes[i]\n ax.plot(fpr, tpr, lw=2, color=color,\n label='ROC curve of {0} {1} (area = {2:0.2f})'\n ''.format(s, i, roc_auc))\n n_fold_roc_auc.append(roc_auc)\n\n average_roc_auc = 0\n if classes is None:\n if average_plot:\n all_y_true = np.concatenate(y_true)\n all_probas = np.concatenate(y_probas)\n fpr_all, tpr_all, _ = roc_curve(all_y_true, all_probas)\n average_roc_auc = auc(fpr_all, tpr_all)\n ax.plot(fpr_all, tpr_all,\n label='average ROC curve '\n '(area = {0:0.2f})'.format(average_roc_auc),\n color='blue', linestyle=':', linewidth=4)\n\n ax.plot([0, 1], [0, 1], 'k--', lw=2)\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n ax.set_xlabel('False Positive Rate', fontsize=text_fontsize)\n ax.set_ylabel('True Positive Rate', fontsize=text_fontsize)\n ax.tick_params(labelsize=text_fontsize)\n ax.legend(loc='lower right', fontsize=text_fontsize)\n return ax, n_fold_roc_auc, average_roc_auc", "def plotArc(self):\n\n # plot the spectra\n self.spcurve,=self.axes.plot(self.xarr,self.farr,linewidth=0.5,linestyle='-',marker='None',color='b')", "def remap_quiver_plot(cmp_AB, ax=None, rate_colors=False, \n border_style=True, arrow_width=None, **kwextra):\n from matplotlib.pyplot import figure, axes, draw\n if ax is None:\n f = figure()\n ax = axes()\n \n # Set vector components for drawing arrows\n X, Y = cmp_AB['A_xy']\n U, V = cmp_AB['B_xy'] - cmp_AB['A_xy']\n args = (X, Y, U, V)\n \n # Calculate rate remapping vector for colors: (max-min)/max\n if rate_colors:\n C = cmp_AB['R_AB']\n args += (C,)\n \n # Set keyword arguments to format the quiver field\n if arrow_width is None:\n set_width = 0.5 # set width here\n else:\n set_width = arrow_width\n kwargs = { 'units':'x', # scale based on data range\n 'scale':1, # data per arrow unit\n 'width':set_width, # arrow units\n 'headwidth':4, # width units\n 'headlength':5, # width units\n 'headaxislength':4, # width units\n 'minshaft':1, # headlength units, scaling threshold\n 'minlength':2.5/set_width } # width units, display threshold\n if rate_colors:\n color_lims = numpy.array([0.0, 1.0])\n if border_style:\n from matplotlib import cm\n kwargs.update({\n 'cmap':cm.Reds, # colormap for arrows\n 'clim':color_lims, # colors on a (0,1) scale\n 'edgecolor':'k', # arrow outline color\n 'lw':0.5 }) # arrow outline line-width \n else:\n from ..tools.colormaps import diffmap\n kwargs.update({\n 'headwidth':4.0, # scale up head with no borders\n 'headlength':5.0, # \n 'headaxislength':3.8, #\n 'cmap':diffmap(use_black=True),\n 'clim':color_lims, # colors on a (0,1) scale\n 'lw':0.0 }) # arrow outline line-width \n kwargs.update(kwextra)\n \n # Execute the quiver command and draw the plot\n ax.cla()\n q = ax.quiver(*args, **kwargs)\n ax.axis('image')\n ax.axis([0, 100, 0, 100])\n draw()\n return q", "def plot_risks_select_combinations(\n self, acquisition_risks, errors='std',\n fig=None, ax=None, alpha=0.3, i=0, labels=None, lw=None,\n white_bg=True, lw2=None):\n if errors == 'std_error':\n middle = self.means\n sqrtN = np.sqrt(self.n_runs)\n upper_base = middle + self.stds/sqrtN\n lower_base = middle - self.stds/sqrtN\n elif errors == 'std':\n middle = self.means\n upper_base = middle + self.stds\n lower_base = middle - self.stds\n elif errors == 'percentiles':\n middle = self.means\n lower_base, upper_base = self.percentiles\n else:\n raise ValueError(f'Do not recognize errors={errors}.')\n if fig is None or ax is None:\n fig, ax = plt.subplots(dpi=200)\n\n linestyles = itertools.cycle(['--', '-.', ':'])\n for acquisition, risk in acquisition_risks:\n acq_risk = f'{acquisition}_{risk}'\n color = acquisition_risks_to_color[acq_risk]\n m = middle.loc[acquisition][risk].values\n s_u = upper_base.loc[acquisition][risk].values\n s_l = lower_base.loc[acquisition][risk].values\n x = np.arange(1, s_l.size + 1)\n\n if white_bg:\n ax.fill_between(\n x, s_u, s_l,\n color='white', alpha=1)\n\n\n ax.fill_between(\n x, s_u, s_l,\n color=color, alpha=alpha)\n ax.plot(x, s_l, '--', color=color, zorder=100, lw=lw)\n ax.plot(x, s_u, '--', color=color, zorder=100, lw=lw)\n ax.plot(x, m, color=color,\n label=labels[i], zorder=100, lw=lw2)\n i += 1\n\n return fig, ax", "def plot(self, ax=..., *, name=..., **kwargs):\n ...", "def _bokeh_roc_curve(\n y_true_binary: np.ndarray,\n y_pred_score: np.ndarray,\n title_rows: Sequence[str],\n sample_weights: Optional[np.ndarray],\n) -> Callable[[], Figure]:\n assert y_true_binary.shape == y_pred_score.shape\n assert set(y_true_binary).issubset({0, 1}) or set(y_true_binary).issubset(\n {False, True}\n )\n assert np.ndim(y_true_binary) == 1\n\n fpr, tpr, thresholds = sklmetrics.roc_curve(\n y_true=y_true_binary, y_score=y_pred_score, sample_weight=sample_weights\n )\n\n def figure() -> Figure:\n source = ColumnDataSource(\n data={\n \"FPR\": fpr,\n \"TPR\": tpr,\n \"threshold\": thresholds,\n \"specificity\": 1.0 - fpr,\n }\n )\n\n p = plotting.figure(\n plot_height=400,\n plot_width=350,\n tools=TOOLS,\n toolbar_location=TOOLBAR_LOCATION,\n # toolbar_location=None, # hides entire toolbar\n match_aspect=True,\n )\n\n p.xaxis.axis_label = \"FPR\"\n p.yaxis.axis_label = \"TPR\"\n\n add_title_rows(p, title_rows)\n apply_default_style(p)\n\n curve = p.line(x=\"FPR\", y=\"TPR\", line_width=2, color=DARK_BLUE, source=source)\n p.line(\n x=[0.0, 1.0],\n y=[0.0, 1.0],\n line_alpha=0.75,\n color=\"grey\",\n line_dash=\"dotted\",\n )\n\n p.add_tools(\n HoverTool(\n # make sure there is no tool tip for the diagonal baseline\n renderers=[curve],\n tooltips=[\n (\"TPR\", \"@TPR\"),\n (\"FPR\", \"@FPR\"),\n (\"Sensitivity\", \"@TPR\"),\n (\"Specificity\", \"@specificity\"),\n (\"Threshold\", \"@threshold\"),\n ],\n # display a tooltip whenever the cursor is vertically in line with a glyph\n mode=\"vline\",\n )\n )\n\n return p\n\n return figure", "def _plot_twoclass(out, fontsize=12, title='', label='', color='darkorange', ax=None, figsize=(12,8)):\n fpr = out['fpr']\n tpr = out['tpr']\n roc_auc = out.get('auc',None)\n \n if color is None:\n color='darkorange'\n\n if ax is None:\n fig,ax= plt.subplots(figsize=figsize)\n\n label = ('ROC curve (area = %.2f) ' %(roc_auc)) + label\n linewidth = 1.5\n ax.plot(fpr, tpr, color=color, lw=linewidth, label=label)\n ax.plot([0, 1], [0, 1], color='navy', lw=linewidth, linestyle='--')\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n ax.set_xlabel('False Positive Rate', fontsize=fontsize)\n ax.set_ylabel('True Positive Rate', fontsize=fontsize)\n ax.set_title('[%s] Receiver operating characteristic. AUC:%.3f' %(title, roc_auc), fontsize=fontsize)\n ax.legend(loc=\"lower right\", fontsize=fontsize)\n ax.grid(True)\n return ax", "def roi_curves(self, data):\n if not data or not any(len(d) for d in data.values()):\n self.roi_traces = None\n default_curve = hv.Curve([], 'Spectrum', 'CL').opts(color='red') \n return hv.NdOverlay({0: default_curve}).opts(show_legend=False) # code breaks without using a curve in ndoverlay\n \n curves = {}\n data = zip(data['x0'], data['x1'], data['y0'], data['y1'])\n self.roi_traces = []\n for i, (x0, x1, y0, y1) in enumerate(data):\n selection = self.xds.sel(x=slice(x0, x1), y=slice(y1, y0))\n selection_avg = selection.mean(['x','y'])\n self.roi_traces.append(selection_avg)\n if self.roi_toggle == 'Trans': # apparently param knows when this changes without having to make it a 'stream' var\n if i == 0:\n substrate = selection_avg.copy()\n selection_avg /= substrate\n curves[i] = hv.Curve(selection_avg)\n \n color_cycle_opts = opts.Curve(color= hv.Cycle(self.color_cycle))\n return hv.NdOverlay(curves).opts(color_cycle_opts)", "def plot(self, fig=None, ax=None,\n curve=True, control_points=True, frenet_serret=False, axis_off=False, ticks_off=False):\n\n if fig is None:\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(111)\n ax.set_xlabel('$u$ parameter', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('NURBS curve value', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(12)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(12)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n if axis_off:\n ax.axis('off')\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(111)\n ax.set_xlabel('$x$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('$y$ axis', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(12)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(12)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n if axis_off:\n ax.axis('off')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n fig = mpl.pyplot.figure(figsize=(6, 5))\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(azim=-120, elev=30)\n ax.grid(False)\n ax.xaxis.pane.fill = False\n ax.yaxis.pane.fill = False\n ax.zaxis.pane.fill = False\n ax.xaxis.pane.set_edgecolor('k')\n ax.yaxis.pane.set_edgecolor('k')\n ax.zaxis.pane.set_edgecolor('k')\n ax.xaxis.pane._alpha = 0.9\n ax.yaxis.pane._alpha = 0.9\n ax.zaxis.pane._alpha = 0.9\n ax.set_xlabel('$x$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('$y$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_zlabel('$z$ axis', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.zaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(8)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(8)\n for t in ax.zaxis.get_major_ticks(): t.label.set_fontsize(8)\n ax.xaxis.set_rotate_label(False)\n ax.yaxis.set_rotate_label(False)\n ax.zaxis.set_rotate_label(False)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n if axis_off:\n ax.axis('off')\n\n else: raise Exception('The number of dimensions must be 1, 2 or 3')\n\n\n # Add objects to the plot\n if curve: self.plot_curve(fig, ax)\n if control_points: self.plot_control_points(fig, ax)\n if frenet_serret: self.plot_frenet_serret(fig, ax)\n\n # Set the scaling of the axes\n self.rescale_plot(fig, ax)\n\n return fig, ax", "def plot(self, x_feature=\"ratio\", y_feature=\"fold_change\", ax=None):\n\n if ax is None:\n ax = plt.gca()\n\n # - Data\n x, y = (\n self.bed_seg.query(f\"sgRNA_ID >= {self.n_sgrna}\")[x_feature],\n self.bed_seg.query(f\"sgRNA_ID >= {self.n_sgrna}\")[y_feature],\n )\n x_, y_ = (\n self.bed_seg.query(f\"sgRNA_ID < {self.n_sgrna}\")[x_feature],\n self.bed_seg.query(f\"sgRNA_ID < {self.n_sgrna}\")[y_feature],\n )\n\n x_pred = np.arange(0, x.max(), 0.1)\n y_pred, y_pred_std = self.predict(x_pred.reshape(-1, 1), return_std=True)\n\n # - Plot\n # Segments used for fitting\n ax.scatter(\n x,\n y,\n c=cy.QCplot.PAL_DBGD[0],\n alpha=0.7,\n edgecolors=\"white\",\n lw=0.3,\n label=f\"#(sgRNA_ID) >= {self.n_sgrna}\",\n )\n\n # Segments not used for fitting\n plt.scatter(\n x_,\n y_,\n c=cy.QCplot.PAL_DBGD[0],\n marker=\"X\",\n alpha=0.3,\n edgecolors=\"white\",\n lw=0.3,\n label=f\"#(sgRNA_ID) < {self.n_sgrna}\",\n )\n\n # Plot GP fit\n # GP fit\n plt.plot(\n x_pred, y_pred, ls=\"-\", lw=1.0, c=cy.QCplot.PAL_DBGD[1], label=\"GPR mean\"\n )\n plt.fill_between(\n x_pred,\n y_pred - y_pred_std,\n y_pred + y_pred_std,\n alpha=0.2,\n color=cy.QCplot.PAL_DBGD[1],\n lw=0,\n )\n\n # Misc\n plt.axhline(0, ls=\":\", color=cy.QCplot.PAL_DBGD[2], lw=0.3, zorder=0)\n\n plt.xlabel(f\"Segment\\n{x_feature}\")\n plt.ylabel(f\"Segment\\nmean {y_feature}\")\n\n plt.title(f\"{self.kernel_}\", fontsize=6)\n\n plt.legend(frameon=False)\n\n return ax", "def andrews_curves(\n frame: DataFrame,\n class_column: str,\n ax: Axes | None = None,\n samples: int = 200,\n color: list[str] | tuple[str, ...] | None = None,\n colormap: Colormap | str | None = None,\n **kwargs,\n) -> Axes:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.andrews_curves(\n frame=frame,\n class_column=class_column,\n ax=ax,\n samples=samples,\n color=color,\n colormap=colormap,\n **kwargs,\n )", "def plot_regime_diagram_background_L19(\n ax=None,\n ):\n if ax is None:\n ax = plt.gca()\n # range of power\n xpr = [-1, 1]\n ypr = [-3, 3]\n # range\n xlims = [10**i for i in xpr]\n ylims = [10**i for i in ypr]\n # background following Fig. 3 of Belcher et al., 2012\n nx = 500\n ny = 500\n xx = np.logspace(xpr[0], xpr[1], nx)\n yy = np.logspace(ypr[0], ypr[1], ny)\n zz1 = np.zeros([nx, ny])\n zz2 = np.zeros([nx, ny])\n zz3 = np.zeros([nx, ny])\n for i in np.arange(nx):\n for j in np.arange(ny):\n zz1[i,j] = 2*(1-np.exp(-0.5*xx[i]))\n zz2[i,j] = 0.22*xx[i]**(-2)\n zz3[i,j] = 0.3*xx[i]**(-2)*yy[j]\n zz = zz1 + zz2 + zz3\n\n rz_ST = zz1/zz\n rz_LT = zz2/zz\n rz_CT = zz3/zz\n fr = np.ones(zz.shape) * 7\n cfrac = 0.25\n fr[(rz_LT<cfrac) & (rz_CT<cfrac)] = 1\n fr[(rz_ST<cfrac) & (rz_CT<cfrac)] = 2\n fr[(rz_ST<cfrac) & (rz_LT<cfrac)] = 3\n fr[(rz_ST>=cfrac) & (rz_LT>=cfrac) & (rz_CT<cfrac)] = 4\n fr[(rz_ST>=cfrac) & (rz_CT>=cfrac) & (rz_LT<cfrac)] = 5\n fr[(rz_LT>=cfrac) & (rz_CT>=cfrac) & (rz_ST<cfrac)] = 6\n color_list = ['firebrick','forestgreen','royalblue','gold','orchid','turquoise','w']\n cb_ticks = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]\n cmap, norm = from_levels_and_colors(cb_ticks, color_list)\n ax.contourf(xx, yy, np.transpose(fr), cmap=cmap, norm=norm)\n ax.contour(xx, yy, np.transpose(fr), colors='darkgray')\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('La$_t$')\n ax.set_ylabel('$h/L_L$')\n ax.set_aspect(aspect=1/3)\n ax.text(0.11, 4e-3, 'Langmuir', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))\n ax.text(3, 4e-3, 'Shear', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))\n ax.text(0.13, 1e2, 'Convection', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))", "def _plot_curves(self, curves_dict):\n for name, curve in curves_dict.items():\n fig = plt.figure()\n ax = plt.gca()\n\n plot_type = name.split('_')[-1]\n ax.set_title(plot_type)\n if plot_type == 'PRC':\n precision, recall, _ = curve\n ax.step(recall, precision, color='b', alpha=0.2, where='post')\n ax.fill_between(recall, precision, step='post', alpha=0.2, color='b')\n ax.set_xlabel('Recall')\n ax.set_ylabel('Precision')\n elif plot_type == 'ROC':\n false_positive_rate, true_positive_rate, _ = curve\n ax.plot(false_positive_rate, true_positive_rate, color='b')\n ax.plot([0, 1], [0, 1], 'r--')\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n else:\n ax.plot(curve[0], curve[1], color='b')\n\n ax.set_ylim([0.0, 1.05])\n ax.set_xlim([0.0, 1.0])\n\n fig.canvas.draw()\n\n curve_img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n curve_img = curve_img.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n self.summary_writer.add_image(name.replace('_', '/'), curve_img, global_step=self.global_step)", "def plot_gains(gains, gain_ref, TITLES, OUT_DIR):\n\n# print 'directory: %s' % OUT_DIR\n# print 'TITLES:%s', TITLES\n\n gain_ref_np = np.array(gains[gain_ref].gain)\n ratios = []\n for gain in gains:\n gain_np = np.array(gain.gain)\n dim = (min(gain_ref_np.shape[0], gain_np.shape[0]),\n min(gain_ref_np.shape[1], gain_np.shape[1])\n )\n# print 'dim = ', dim\n ratios.append(gain_np[0:dim[0], 0:dim[1]] / gain_ref_np[0:dim[0], 0:dim[1]])\n\n# print 'Ratios = ', ratios\n\n rows = 2*((len(ratios) -1) / 6 + 1)\n cmap = plt.get_cmap('gnuplot')\n colors = [cmap(i) for i in np.linspace(0, 1, len(ratios))]\n fig, axes = plt.subplots(nrows=rows, ncols=6)\n fig.set_size_inches(20,20)\n axfl = axes.flatten()\n for i, ratio in enumerate(ratios):\n# print 'Plotting %s', TITLES[i]\n\tj = (i / 6)*12 + i % 6\n ax = axfl[j]\n ax2 = axfl[j+6]\n ax.hist(np.reshape(ratio, -1), 20, range=(0.9, 1.1), facecolor=colors[i])\n ax.set_title(TITLES[i], size=20)\n ax2.hist(np.reshape(ratio, -1), 50, range=(0., 2.), facecolor=colors[i])\n\n fig.suptitle(\"Gains with ref gain '%s'\" % TITLES[gain_ref], y=0.95, size=25)\n # fig.tight_layout()\n plt.savefig(OUT_DIR + 'gain.png')\n plt.close(fig)", "def plot_pc_curves_together(binary_model, ova_model, multi_model, indices):\n binary_range_metrics = binary_model.compute_probability_range_metrics(\n binary_model.results)\n ova_range_metrics = ova_model.compute_probability_range_metrics(\n ova_model.results)\n multi_range_metrics = multi_model.compute_probability_range_metrics(\n multi_model.results)\n\n class_labels = ova_model.class_labels\n f, ax = plt.subplots(nrows=len(indices),\n ncols=3,\n sharex=True, sharey=True,\n figsize=(FIG_WIDTH, 10),\n dpi=DPI)\n\n y_indices = [0, 0.2, 0.4, 0.6, 0.8, 1]\n y_ticks = [\"0\", \"20\", \"40\", \"60\", \"80\", \"\"]\n plot_index = 0\n for class_index, class_name in enumerate(class_labels):\n if class_index not in indices:\n continue\n\n if plot_index == 0:\n # Add titles to top of plots\n ax[plot_index][0].set_title(\"Binary\", fontsize=TICK_S)\n ax[plot_index][1].set_title(\"OVA\", fontsize=TICK_S)\n ax[plot_index][2].set_title(\"Multi\", fontsize=TICK_S)\n\n plot_model_curves(class_name, binary_model,\n binary_range_metrics, ax[plot_index][0])\n plot_model_curves(class_name, ova_model, ova_range_metrics, ax[plot_index][1])\n mirror_ax = plot_model_curves(\n class_name, multi_model, multi_range_metrics, ax[plot_index][2])\n\n ax[plot_index][0].set_yticks(ticks=y_indices)\n ax[plot_index][0].set_yticklabels(labels=y_ticks, color=P_BAR_COLOR)\n mirror_ax.set_yticks(ticks=y_indices)\n mirror_ax.set_yticklabels(labels=y_ticks, color=C_BAR_COLOR)\n ax[plot_index][0].tick_params(axis='both', direction='in', labelsize=10)\n ax[plot_index][1].tick_params(axis='both', direction='in')\n ax[plot_index][2].tick_params(axis='both', direction='in', labelsize=10)\n\n mpl.rcParams['font.serif'] = ['times', 'times new roman']\n mpl.rcParams['font.family'] = 'serif'\n pretty_class_name = clean_class_name(class_name)\n ax[plot_index][0].text(0, 0.85, pretty_class_name, fontsize=14)\n plot_index += 1\n\n x_indices = np.linspace(0, 1, 11)[:-1]\n\n plt.xticks(x_indices, [\"\", \"10\", \"\", \"30\", \"\", \"50\", \"\", \"70\", \"\", \"90\"])\n rc('text', usetex=True)\n f.text(0.5, 0.08, r'Probability $\\geq$X\\%', fontsize=TICK_S, ha='center')\n bp = \"Balanced \" if binary_model.balanced_purity else \"\"\n f.text(0.03, .5, bp + 'Purity (\\%)',\n fontsize=TICK_S, va='center', rotation='vertical', color=P_BAR_COLOR)\n f.text(0.98, .5, 'Completeness (\\%)',\n fontsize=TICK_S, va='center', rotation='vertical', color=C_BAR_COLOR)\n\n plt.subplots_adjust(wspace=0, hspace=0)\n\n f.savefig(\"../output/custom_figures/merged_pc_curves_\" +\n str(indices) + \".pdf\", bbox_inches='tight')\n plt.show()", "def plot_ratios(\n self, acquisition_risks, errors='std', labels=None,\n fig=None, ax=None, alpha=0.3, i=0, smoothing=False):\n if errors == 'percentiles':\n upper_base = self.quant_errors\n elif errors == 'std':\n upper_base = self.errors\n else:\n raise ValueError\n\n if fig is None or ax is None:\n fig, ax = plt.subplots(dpi=200)\n\n x = np.arange(1, self.n_points+1)\n linestyles = itertools.cycle(['--', '-.', ':'])\n for acquisition, risk in acquisition_risks:\n acq_risk = f'{acquisition}_{risk}'\n\n color = acquisition_risks_to_color.get(acq_risk, 'b')\n\n selected = upper_base.loc[\n 'RandomAcquisition']['BiasedRiskEstimator'].values\n random = upper_base.loc[acquisition][risk].values\n\n y = selected / random\n\n ax.plot(\n x, y, '-', color=color, label=labels[i],\n zorder=100)\n\n i += 1\n\n return fig, ax", "def plot_r(self):\n for k, v, o in self.data:\n self.plot_r1(k, v, o)", "def plot_calibration_curve(est, name, fig_index):\n # Calibrated with isotonic calibration\n isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # Logistic regression with no calibration as baseline\n lr = LogisticRegression(C=1.)\n\n fig = plt.figure(fig_index, figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n for clf, name in [(lr, 'Logistic'),(est, name),(isotonic, name + ' + Isotonic'),(sigmoid, name + ' + Sigmoid')]:\n #Para cada modelo, entrenamos y predecimos \n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\\n\" % f1_score(y_test, y_pred))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)')\n\n ax2.set_xlabel(\"Mean predicted value\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n plt.tight_layout()", "def control_plot(data: (List[int], List[float], pd.Series, np.array),\n upper_control_limit: (int, float), lower_control_limit: (int, float),\n highlight_beyond_limits: bool = True, highlight_zone_a: bool = True,\n highlight_zone_b: bool = True, highlight_zone_c: bool = True,\n highlight_trend: bool = True, highlight_mixture: bool = True,\n highlight_stratification: bool = True, highlight_overcontrol: bool = True,\n ax: Axis = None):\n\n data = coerce(data)\n\n if ax is None:\n fig, ax = plt.subplots()\n\n ax.plot(data)\n ax.set_title('Zone Control Chart')\n\n spec_range = (upper_control_limit - lower_control_limit) / 2\n spec_center = lower_control_limit + spec_range\n zone_c_upper_limit = spec_center + spec_range / 3\n zone_c_lower_limit = spec_center - spec_range / 3\n zone_b_upper_limit = spec_center + 2 * spec_range / 3\n zone_b_lower_limit = spec_center - 2 * spec_range / 3\n zone_a_upper_limit = spec_center + spec_range\n zone_a_lower_limit = spec_center - spec_range\n\n ax.axhline(spec_center, linestyle='--', color='red', alpha=0.6)\n ax.axhline(zone_c_upper_limit, linestyle='--', color='red', alpha=0.5)\n ax.axhline(zone_c_lower_limit, linestyle='--', color='red', alpha=0.5)\n ax.axhline(zone_b_upper_limit, linestyle='--', color='red', alpha=0.3)\n ax.axhline(zone_b_lower_limit, linestyle='--', color='red', alpha=0.3)\n ax.axhline(zone_a_upper_limit, linestyle='--', color='red', alpha=0.2)\n ax.axhline(zone_a_lower_limit, linestyle='--', color='red', alpha=0.2)\n\n left, right = ax.get_xlim()\n right_plus = (right - left) * 0.01 + right\n\n ax.text(right_plus, upper_control_limit, s='UCL', va='center')\n ax.text(right_plus, lower_control_limit, s='LCL', va='center')\n\n ax.text(right_plus, (spec_center + zone_c_upper_limit) / 2, s='Zone C', va='center')\n ax.text(right_plus, (spec_center + zone_c_lower_limit) / 2, s='Zone C', va='center')\n ax.text(right_plus, (zone_b_upper_limit + zone_c_upper_limit) / 2, s='Zone B', va='center')\n ax.text(right_plus, (zone_b_lower_limit + zone_c_lower_limit) / 2, s='Zone B', va='center')\n ax.text(right_plus, (zone_a_upper_limit + zone_b_upper_limit) / 2, s='Zone A', va='center')\n ax.text(right_plus, (zone_a_lower_limit + zone_b_lower_limit) / 2, s='Zone A', va='center')\n\n plot_params = {'alpha': 0.3, 'zorder': -10, 'markersize': 14}\n\n if highlight_beyond_limits:\n beyond_limits_violations = control_beyond_limits(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(beyond_limits_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(beyond_limits_violations, 'o', color='red', label='beyond limits', **plot_params)\n\n if highlight_zone_a:\n zone_a_violations = control_zone_a(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_a_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_a_violations, 'o', color='orange', label='zone a violations', **plot_params)\n\n if highlight_zone_b:\n zone_b_violations = control_zone_b(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_b_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_b_violations, 'o', color='blue', label='zone b violations', **plot_params)\n\n if highlight_zone_c:\n zone_c_violations = control_zone_c(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_c_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_c_violations, 'o', color='green', label='zone c violations', **plot_params)\n\n if highlight_trend:\n zone_trend_violations = control_zone_trend(data=data)\n if len(zone_trend_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_trend_violations, 'o', color='purple', label='trend violations', **plot_params)\n\n if highlight_mixture:\n zone_mixture_violations = control_zone_mixture(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_mixture_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_mixture_violations, 'o', color='brown', label='mixture violations', **plot_params)\n\n if highlight_stratification:\n zone_stratification_violations = control_zone_stratification(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_stratification_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_stratification_violations, 'o', color='orange', label='stratification violations',\n **plot_params)\n\n if highlight_overcontrol:\n zone_overcontrol_violations = control_zone_overcontrol(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_overcontrol_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_overcontrol_violations, 'o', color='blue', label='overcontrol violations',\n **plot_params)\n\n ax.legend()", "def plot_connector(ax, arc_df):\n # gene = get_property(arc_df, 'gene_name')\n clvs = arc_df['aclv_t'].values.tolist()\n scs = arc_df['sc_t'].values.tolist()\n\n # lowest = -0.1 if (gene, dise) in MAIN_PLOT_GD_PAIRS_COMPLEX else -0.8\n lowest = -1\n # for i in clvs + scs:\n for i in clvs:\n # -0.25 is fragile, but seems to be good for now. It's affect by hspace\n # when specifying the grid and ymin, but the exact relationship is unclear, not sure\n # what unit does hspace use\n\n # or if zorders of different axes are pro, then it's fine.\n ax.plot([i, i], [lowest, 0], ':', linewidth=0.5, color='#333333', clip_on=False)", "def plot_figures(compare_df, compare_df_without_1_5, over_3, over_6, ppm, \n color_prob, color_count, edgecolor): \n # Create a gridspec to plot in\n fig = plt.figure()\n gs = fig.add_gridspec(2,4)\n ax1 = fig.add_subplot(gs[0,:])\n ax2 = fig.add_subplot(gs[1,:2])\n ax3 = fig.add_subplot(gs[1,2])\n ax4 = fig.add_subplot(gs[1,3])\n \n ##### Plot the total count\n compare_df.plot(kind=\"bar\", ax=ax1,zorder=5, color=[color_prob, color_count], edgecolor=edgecolor )\n ax1.set_title(\"a) Temperature count in AR5 working group reports and special reports until 2020\")\n \n #### Plot without the 1.5 special report\n compare_df_without_1_5.plot(kind=\"bar\", ax=ax2, legend=False,zorder=5, color=[color_prob, color_count], edgecolor=edgecolor)\n ax2.set_title(\"b) Excluding special report on 1.5°C warming\")\n plt.setp(ax2.xaxis.get_majorticklabels(), fontsize=6)\n \n #### Plot 3\n over_3.plot(kind=\"bar\", ax=ax3, width=0.1, legend=False,zorder=5, color=[color_prob, color_count], edgecolor=edgecolor)\n ax3.set_title(\"c) 3°C and above\")\n plt.setp(ax3.xaxis.get_majorticklabels(), color=\"white\")\n \n #### Plot only 6 degrees and above\n over_6.plot(kind=\"bar\", ax=ax4, width=0.1, legend=False,zorder=5, color=[color_prob, color_count], edgecolor=edgecolor)\n ax4.set_title(\"d) 6°C and above\")\n plt.setp(ax4.xaxis.get_majorticklabels(), color=\"white\")\n \n # make nicer\n i = 0\n for ax in [ax1, ax2, ax3, ax4]:\n ax.set_ylabel(\"Percentage [%]\")\n if i == 0:\n plot_nicer(ax)\n else: \n plot_nicer(ax, with_legend=False)\n plt.setp(ax.xaxis.get_majorticklabels(), rotation=0)\n i +=1\n \n fig=plt.gcf()\n fig.set_size_inches(12,6)\n fig.tight_layout()\n plt.savefig(\"Figures\" + os.sep + \"warming_count_\"+str(ppm)+\".png\",dpi=200, bbox_inches=\"tight\")\n plt.close()", "def plot_coefs(data, x_label, y_label, title, kind = 'barh', style = 'seaborn-darkgrid',\n figsize = (10, 8)):\n\n with plt.style.context(style):\n \n ax = data.plot(kind=kind, figsize = figsize, rot=45)\n \n if kind == 'barh':\n \n ax.xaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('${x:,.0f}'))\n ax.set_yticklabels(ax.get_yticklabels(), ha='right')\n ax.axvline(color='k')\n ax.set(xlabel = x_label, ylabel = y_label, title = title)\n \n else:\n ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('${x:,.0f}'))\n ax.set_xticklabels(ax.get_xticklabels(), ha='right')\n ax.axhline(color='k')\n ax.set(xlabel = x_label, ylabel = y_label, title = title)\n\n return ax", "def plot_axes(\n ax, y, x,\n interpolation=None,\n marker_size=30,\n title=None,\n legend_text='Area'):\n ax.scatter(x, y, marker='o', linewidths=0, s=marker_size, clip_on=False)\n # Show first and last points more visably\n ax.scatter([x[i] for i in [0, -1]], [y[i] for i in [0, -1]],\n marker='x', linewidths=2, s=100, clip_on=False)\n ax.set_xlim((-0.05, 1.05))\n ax.set_ylim((-0.08, 1.08))\n ax.set_xlabel('Recall')\n ax.set_ylabel('Precision')\n if title is not None:\n ax.set_title(title, fontsize=20)\n if interpolation is not None:\n if interpolation == 'linear':\n ax.plot(x, y)\n area = auc(x, y)\n ax.fill_between(x, 0, y, alpha=0.2,\n label='{} = {:5.4f}'.format(legend_text, area))\n leg = ax.legend()\n leg.get_frame().set_linewidth(0.0)\n elif interpolation == 'step':\n p_long = [v for v in y for _ in (0, 1)][:-1]\n r_long = [v for v in x for _ in (0, 1)][1:]\n ax.plot(r_long, p_long)\n area = auc_using_step(x, y)\n ax.fill_between(r_long, 0, p_long, alpha=0.2, \n label='{} = {:5.4f}'.format(legend_text, area))\n leg = ax.legend()\n leg.get_frame().set_linewidth(0.0)\n else:\n print(\"Interpolation value of '{}' not recognised. \"\n \"Choose from 'linear', 'quadrature'.\".format(interpolation))", "def _th_plot(self, y_true, y_pred_proba, pos_label, pos_label_ind,\n best_th_, q, tpr, fpr, th_, th_range, index, roc_auc_kwargs):\n fig, axs = plt.subplots(nrows=2, ncols=1)\n fig.set_size_inches(10, 10)\n # Roc score.\n y_type = sklearn.utils.multiclass.type_of_target(y_true)\n if y_type == \"binary\":\n roc_auc = sklearn.metrics.roc_auc_score(\n y_true, y_pred_proba[:, pos_label_ind], **roc_auc_kwargs)\n elif y_type == \"multiclass\":\n roc_auc = sklearn.metrics.roc_auc_score(\n y_true, y_pred_proba, **roc_auc_kwargs)\n else:\n assert False, f\"Unhandled y_type {y_type}\"\n # Roc curve.\n axs[0].plot(fpr, tpr, 'darkorange',\n label=f\"ROC curve (AUC = {roc_auc:.3f}).\")\n axs[0].scatter(fpr[index], tpr[index], c='b', marker=\"o\")\n axs[0].plot([0, 1], [0, 1], color='navy', linestyle='--')\n axs[0].set_xlabel('False Positive Rate')\n axs[0].set_ylabel('True Positive Rate')\n axs[0].set_title(f\"Receiver operating characteristic \"\n f\"(label '{pos_label}')\")\n axs[0].legend(loc=\"lower right\")\n # Metric q.\n axs[1].plot(th_, q, 'green')\n axs[1].vlines(best_th_, np.min(q), np.max(q))\n axs[1].vlines(th_range, np.min(q), np.max(q), colors='b',\n linestyles=':')\n axs[1].set_xlim([0.0, 1.0])\n axs[1].set_xlabel('Threshold')\n axs[1].set_ylabel('TPR/(TPR+FPR)')\n axs[1].set_title('Selected th values objective maximum')\n # plt.plot(th_, fpr, 'red')\n plt.show()", "def plot_koeppen(beck, ax):\n alpha=0.6\n # Create the right colros for Koeppen Geiger\n climate_dict = {\"1\": \"Tropical, rainforest\" , \"2\": \"Tropical, monsoon\", \"3\": \"Tropical, savannah\",\n \"4\": \"Arid, desert, hot\", \"5\": \"Arid, desert, cold\", \"6\": \"Arid, steppe, hot\",\n \"7\": \"Arid, steppe, cold\", \"8\": \"Temperate, dry summer, hot summer\", \n \"9\": \"Temperate, dry summer, warm summer\", \"10\": \"Temperate, dry summer, cold summer\",\n \"11\": \"Temperate, dry winter, hot summer\", \"12\": \"Temperate, dry winter, warm summer\",\n \"13\": \"Temperate, dry winter, cold summer\", \"14\": \"Temperate, no dry season, hot summer\",\n \"15\": \"Temperate, no dry season, warm summer\", \"16\": \"Temperate, no dry season, cold summer\",\n \"17\": \"Cold, dry summer, hot summer\", \"18\": \"Cold, dry summer, warm summer\",\n \"19\": \"Cold, dry summer, cold summer\", \"20\": \"Cold, dry summer, very cold winter\",\n \"21\": \"Cold, dry winter, hot summer\", \"22\": \"Cold, dry winter, warm summer\", \n \"23\": \"Cold, dry winter, cold summer\", \"24\": \"Cold, dry winter, very cold winter\",\n \"25\": \"Cold, no dry season, hot summer\", \"26\": \"Cold, no dry season, warm summer\",\n \"27\": \"Cold, no dry season, cold summer\", \"28\": \"Cold, no dry season, very cold winter\",\n \"29\": \"Polar, tundra\", \"30\": \"Polar, frost\"}\n color_dict = {\"1\": \"[0 0 255]\" , \"2\": \"[0 120 255]\", \"3\": \"[70 170 250]\",\n \"4\": \"[255 0 0]\", \"5\": \"[255 150 150]\", \"6\": \"[245 165 0]\",\n \"7\": \"[255 220 100]\", \"8\": \"[255 255 0]\", \n \"9\": \"[200 200 0]\", \"10\": \"[150 150 0]\",\n \"11\": \"[150 255 150]\", \"12\": \"[100 200 100]\",\n \"13\": \"[50 150 50]\", \"14\": \"[200 255 80]\",\n \"15\": \"[100 255 80]\", \"16\": \"[50 200 0]\",\n \"17\": \"[255 0 255]\", \"18\": \"[200 0 200]\",\n \"19\": \"[150 50 150]\", \"20\": \"[150 100 150]\",\n \"21\": \"[170 175 255]\", \"22\": \"[90 120 220]\", \n \"23\": \"[75 80 180]\", \"24\": \"[50 0 135]\",\n \"25\": \"[0 255 255]\", \"26\": \"[55 200 255]\",\n \"27\": \"[0 125 125]\", \"28\": \"[0 70 95]\",\n \"29\": \"[178 178 178]\", \"30\": \"[102 102 102]\"}\n for key in color_dict.keys():\n color = color_dict[key]\n color = color.replace(\"[\",\"\").replace(\"]\",\"\").split(\" \")\n color = [int(val)/255 for val in color] +[1]\n color_dict[key] = color\n \n climate_to_color = {climate_dict[key] : value for key, value in color_dict.items()}\n \n beck[\"Climatic Regions\"] = beck[\"RASTERVALU\"].astype(str)\n beck.replace({\"Climatic Regions\":climate_dict}, inplace=True)\n \n def color_for_label(label):\n return [climate_to_color[x] for x in label]\n \n # As percentag\n #df = beck.groupby([\"gauge_clus\",'Climatic Regions']).size().groupby(level=0).apply(\n # lambda x: 100 * x / x.sum()).unstack()\n # Group the dataframe\n df = beck.groupby([\"gauge_clus\",'Climatic Regions']).size().unstack()\n # Plotting\n ax = df.plot(kind=\"bar\", stacked=True, color=color_for_label(df.columns.values), alpha=1, ax = ax, zorder=4)\n # ax.set_title(\"a) Membership of Koeppen-Geiger clusters (Beck et al. (2018)) in the hydrological clusters\", loc=\"left\", alpha=alpha)\n ax.set_xlabel(\"Hydrological Cluster\", alpha=alpha)\n ax.set_ylabel(\"Number of Catchments\", alpha=alpha)\n legend = ax.legend(ncol=2, title=\"Climatic Cluster\")\n for text in legend.get_texts():\n text.set_color(\"grey\")\n legend.get_title().set_color(\"grey\")\n # Make it nicer\n for spine in ax.spines.values():\n spine.set_visible(False)\n ax.yaxis.grid(True, color=\"lightgrey\", zorder=0)\n plt.setp(ax.get_yticklabels(), alpha=alpha)\n plt.setp(ax.get_xticklabels(), alpha=alpha, rotation=0)\n ax.tick_params(axis=u'both', which=u'both',length=0)", "def rstyle(ax):\r\n #set the style of the major and minor grid lines, filled blocks\r\n ax.grid(True, 'major', color='w', linestyle='-', linewidth=1.4)\r\n ax.grid(True, 'minor', color='0.92', linestyle='-', linewidth=0.7)\r\n ax.patch.set_facecolor('0.85')\r\n ax.set_axisbelow(True)\r\n \r\n #set minor tick spacing to 1/2 of the major ticks\r\n ax.xaxis.set_minor_locator(pylab.MultipleLocator( (pylab.xticks()[0][1]-pylab.xticks()[0][0]) / 2.0 ))\r\n ax.yaxis.set_minor_locator(pylab.MultipleLocator( (pylab.yticks()[0][1]-pylab.yticks()[0][0]) / 2.0 ))\r\n \r\n #remove axis border\r\n for child in ax.get_children():\r\n if isinstance(child, matplotlib.spines.Spine):\r\n child.set_alpha(0)\r\n \r\n #restyle the tick lines\r\n for line in ax.get_xticklines() + ax.get_yticklines():\r\n line.set_markersize(5)\r\n line.set_color(\"gray\")\r\n line.set_markeredgewidth(1.4)\r\n \r\n #remove the minor tick lines \r\n for line in ax.xaxis.get_ticklines(minor=True) + ax.yaxis.get_ticklines(minor=True):\r\n line.set_markersize(0)\r\n \r\n #only show bottom left ticks, pointing out of axis\r\n pylab.rcParams['xtick.direction'] = 'out'\r\n pylab.rcParams['ytick.direction'] = 'out'\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.yaxis.set_ticks_position('left')\r\n \r\n \r\n if ax.legend_ <> None:\r\n lg = ax.legend_\r\n lg.get_frame().set_linewidth(0)\r\n lg.get_frame().set_alpha(0.5)", "def curve(self, data):\n x, y, y_smoothed = data\n\n curve_keys = ['color', 'linestyle', 'alpha', 'label']\n curve_config = self.config.filter(curve_keys, prefix='curve_')\n\n curves = self.ax.plot(x, y, **curve_config)\n\n if y_smoothed is not None:\n smoothed_color = scale_lightness(curve_config['color'], scale=.5)\n smoothed_label = self.config.get('smoothed_label')\n _ = self.ax.plot(x, y_smoothed, label=smoothed_label, color=smoothed_color, linestyle='--')\n\n return curves", "def plot_band( # pylint: disable=too-many-statements,too-many-locals,too-many-branches\n band: ty.Union[dict, orm.BandsData],\n ref_zero: float = 0,\n ax=None,\n):\n from matplotlib import rc\n\n if ref_zero is None:\n ref_zero = 0\n\n # Uncomment to change default font\n # rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n rc(\n \"font\",\n **{\n \"family\": \"serif\",\n \"serif\": [\n \"Computer Modern\",\n \"CMU Serif\",\n \"Times New Roman\",\n \"DejaVu Serif\",\n ],\n },\n )\n # To use proper font for, e.g., Gamma if usetex is set to False\n rc(\"mathtext\", fontset=\"cm\")\n\n rc(\"text\", usetex=True)\n # Deprecated\n # https://github.com/matplotlib/matplotlib/blob/main/doc/api/prev_api_changes/api_changes_3.3.0/deprecations.rst#textlatexpreview-rcparam\n # plt.rcParams.update({'text.latex.preview': True})\n\n print_comment = False\n\n all_data = get_band_dict(band)\n\n if not all_data.get(\"use_latex\", False):\n rc(\"text\", usetex=False)\n\n # x = all_data['x']\n # bands = all_data['bands']\n paths = all_data[\"paths\"]\n tick_pos = all_data[\"tick_pos\"]\n tick_labels = all_data[\"tick_labels\"]\n\n # Option for bands (all, or those of type 1 if there are two spins)\n further_plot_options1 = {}\n further_plot_options1[\"color\"] = all_data.get(\"bands_color\", \"k\")\n further_plot_options1[\"linewidth\"] = all_data.get(\"bands_linewidth\", 0.5)\n further_plot_options1[\"linestyle\"] = all_data.get(\"bands_linestyle\", None)\n further_plot_options1[\"marker\"] = all_data.get(\"bands_marker\", None)\n further_plot_options1[\"markersize\"] = all_data.get(\"bands_markersize\", None)\n further_plot_options1[\"markeredgecolor\"] = all_data.get(\n \"bands_markeredgecolor\", None\n )\n further_plot_options1[\"markeredgewidth\"] = all_data.get(\n \"bands_markeredgewidth\", None\n )\n further_plot_options1[\"markerfacecolor\"] = all_data.get(\n \"bands_markerfacecolor\", None\n )\n\n # Options for second-type of bands if present (e.g. spin up vs. spin down)\n further_plot_options2 = {}\n further_plot_options2[\"color\"] = all_data.get(\"bands_color2\", \"r\")\n # Use the values of further_plot_options1 by default\n further_plot_options2[\"linewidth\"] = all_data.get(\n \"bands_linewidth2\", further_plot_options1[\"linewidth\"]\n )\n further_plot_options2[\"linestyle\"] = all_data.get(\n \"bands_linestyle2\", further_plot_options1[\"linestyle\"]\n )\n further_plot_options2[\"marker\"] = all_data.get(\n \"bands_marker2\", further_plot_options1[\"marker\"]\n )\n further_plot_options2[\"markersize\"] = all_data.get(\n \"bands_markersize2\", further_plot_options1[\"markersize\"]\n )\n further_plot_options2[\"markeredgecolor\"] = all_data.get(\n \"bands_markeredgecolor2\", further_plot_options1[\"markeredgecolor\"]\n )\n further_plot_options2[\"markeredgewidth\"] = all_data.get(\n \"bands_markeredgewidth2\", further_plot_options1[\"markeredgewidth\"]\n )\n further_plot_options2[\"markerfacecolor\"] = all_data.get(\n \"bands_markerfacecolor2\", further_plot_options1[\"markerfacecolor\"]\n )\n\n if ax is None:\n fig = plt.figure()\n p = fig.add_subplot(1, 1, 1) # pylint: disable=invalid-name\n else:\n p = ax # pylint: disable=invalid-name\n\n first_band_1 = True\n first_band_2 = True\n\n for path in paths:\n if path[\"length\"] <= 1:\n # Avoid printing empty lines\n continue\n x = path[\"x\"]\n # for band in bands:\n # pylint: disable=redefined-argument-from-local\n for band, band_type in zip(path[\"values\"], all_data[\"band_type_idx\"]):\n # For now we support only two colors\n if band_type % 2 == 0:\n further_plot_options = further_plot_options1\n else:\n further_plot_options = further_plot_options2\n\n # Put the legend text only once\n label = None\n if first_band_1 and band_type % 2 == 0:\n first_band_1 = False\n label = all_data.get(\"legend_text\", None)\n elif first_band_2 and band_type % 2 == 1:\n first_band_2 = False\n label = all_data.get(\"legend_text2\", None)\n\n p.plot(x, [_ - ref_zero for _ in band], label=label, **further_plot_options)\n\n p.set_xticks(tick_pos)\n p.set_xticklabels(tick_labels)\n p.set_xlim([all_data[\"x_min_lim\"], all_data[\"x_max_lim\"]])\n p.set_ylim([all_data[\"y_min_lim\"] - ref_zero, all_data[\"y_max_lim\"] - ref_zero])\n p.xaxis.grid(True, which=\"major\", color=\"#888888\", linestyle=\"-\", linewidth=0.5)\n\n if all_data.get(\"plot_zero_axis\", False):\n p.axhline(\n 0.0,\n color=all_data.get(\"zero_axis_color\", \"#888888\"),\n linestyle=all_data.get(\"zero_axis_linestyle\", \"--\"),\n linewidth=all_data.get(\"zero_axis_linewidth\", 0.5),\n )\n if all_data[\"title\"]:\n p.set_title(all_data[\"title\"])\n if all_data[\"legend_text\"]:\n p.legend(loc=\"best\")\n p.set_ylabel(all_data[\"yaxis_label\"])\n\n try:\n if print_comment:\n print(all_data[\"comment\"])\n except KeyError:\n pass\n\n if ax is None:\n plt.show()", "def powerpoint_style2(Axe_tick_size=15,Line_size=3) : \n fig = plt.gcf()\n def myfunc(x):\n return hasattr(x, 'set_linewidth')\n for o in fig.findobj(myfunc):\n o.set_linewidth(Line_size)\n \n def myfunc(x):\n return hasattr(x, 'set_markersize')\n for o in fig.findobj(myfunc):\n o.set_markersize(Line_size+4)\n def myfunc(x):\n return hasattr(x, 'set_markeredgewidth')\n for o in fig.findobj(myfunc):\n o.set_markeredgewidth(Line_size)\n for ax in fig.axes:\n \n # trouve tous les trucs avec linewidth et les modifie\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(Axe_tick_size)\n \n #for item in ([ax.xaxis.label, ax.yaxis.label]):\n # item.set_fontsize(Axe_tick_size+5)\n for line in ax.get_xticklines() + ax.get_yticklines():\n line.set_markersize(Line_size+2)\n line.set_markeredgewidth(Line_size)", "def plot_ARD(self, fignum=None, ax=None, title='', legend=False):\r\n if ax is None:\r\n fig = pb.figure(fignum)\r\n ax = fig.add_subplot(111)\r\n else:\r\n fig = ax.figure\r\n from GPy.util import Tango\r\n from matplotlib.textpath import TextPath\r\n Tango.reset()\r\n xticklabels = []\r\n bars = []\r\n x0 = 0\r\n for p in self.parts:\r\n c = Tango.nextMedium()\r\n if hasattr(p, 'ARD') and p.ARD:\r\n if title is None:\r\n ax.set_title('ARD parameters, %s kernel' % p.name)\r\n else:\r\n ax.set_title(title)\r\n if p.name == 'linear':\r\n ard_params = p.variances\r\n else:\r\n ard_params = 1. / p.lengthscale\r\n\r\n x = np.arange(x0, x0 + len(ard_params))\r\n bars.append(ax.bar(x, ard_params, align='center', color=c, edgecolor='k', linewidth=1.2, label=p.name))\r\n xticklabels.extend([r\"$\\mathrm{{{name}}}\\ {x}$\".format(name=p.name, x=i) for i in np.arange(len(ard_params))])\r\n x0 += len(ard_params)\r\n x = np.arange(x0)\r\n transOffset = offset_copy(ax.transData, fig=fig,\r\n x=0., y= -2., units='points')\r\n transOffsetUp = offset_copy(ax.transData, fig=fig,\r\n x=0., y=1., units='points')\r\n for bar in bars:\r\n for patch, num in zip(bar.patches, np.arange(len(bar.patches))):\r\n height = patch.get_height()\r\n xi = patch.get_x() + patch.get_width() / 2.\r\n va = 'top'\r\n c = 'w'\r\n t = TextPath((0, 0), \"${xi}$\".format(xi=xi), rotation=0, ha='center')\r\n transform = transOffset\r\n if patch.get_extents().height <= t.get_extents().height + 3:\r\n va = 'bottom'\r\n c = 'k'\r\n transform = transOffsetUp\r\n ax.text(xi, height, \"${xi}$\".format(xi=int(num)), color=c, rotation=0, ha='center', va=va, transform=transform)\r\n # for xi, t in zip(x, xticklabels):\r\n # ax.text(xi, maxi / 2, t, rotation=90, ha='center', va='center')\r\n # ax.set_xticklabels(xticklabels, rotation=17)\r\n ax.set_xticks([])\r\n ax.set_xlim(-.5, x0 - .5)\r\n if legend:\r\n if title is '':\r\n mode = 'expand'\r\n if len(bars) > 1:\r\n mode = 'expand'\r\n ax.legend(bbox_to_anchor=(0., 1.02, 1., 1.02), loc=3,\r\n ncol=len(bars), mode=mode, borderaxespad=0.)\r\n fig.tight_layout(rect=(0, 0, 1, .9))\r\n else:\r\n ax.legend()\r\n return ax", "def plot(\n self,\n color_map={\n \"ex\": (1, 0.2, 0.2),\n \"ey\": (1, 0.5, 0),\n \"hx\": (0, 0.5, 1),\n \"hy\": (0.5, 0.2, 1),\n \"hz\": (0.2, 1, 1),\n },\n channel_order=None,\n ):\n\n if channel_order is not None:\n ch_list = channel_order()\n else:\n ch_list = self.channels\n\n n_channels = len(self.channels)\n\n fig = plt.figure()\n fig.subplots_adjust(hspace=0)\n ax_list = []\n for ii, comp in enumerate(ch_list, 1):\n try:\n color = color_map[comp]\n except KeyError:\n color = (0, 0.4, 0.8)\n if ii == 1:\n ax = plt.subplot(n_channels, 1, ii)\n else:\n ax = plt.subplot(n_channels, 1, ii, sharex=ax_list[0])\n self.dataset[comp].plot.line(ax=ax, color=color)\n ax.grid(which=\"major\", color=(0.65, 0.65, 0.65), ls=\"--\", lw=0.75)\n ax.grid(which=\"minor\", color=(0.85, 0.85, 0.85), ls=\"--\", lw=0.5)\n ax.set_axisbelow(True)\n if ii != len(ch_list):\n plt.setp(ax.get_xticklabels(), visible=False)\n\n ax_list.append(ax)", "def plot_coverage(ax, numstages, data, styles=['r-','b*','go']):\n plot_data(ax,numstages,data,styles)", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n # plt.show()\n return buf", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def _plot_rfs(ax, xe, ye, de, legend, alpha=0.5):\n # ax = plt.axes()\n ax.set_aspect('equal')\n # FIXME: HARD CODED 2x\n r = 0.203 * de\n for i, (x, y) in enumerate(zip(xe, ye)):\n if i == 0:\n label = None # 'One SDev of Neuron RF'\n else:\n label = None\n ax.add_patch(plt.Circle((x, -y), r, color='red', fill=True,\n alpha=alpha, label=label))\n\n if legend:\n plt.legend()\n ax.set_xlabel('x (arcmin)')\n ax.set_ylabel('y (arcmin)')", "def powerpoint_style3(fig,Axe_tick_size=15,Line_size=3) : \n #fig = plt.gcf()\n def myfunc(x):\n return hasattr(x, 'set_linewidth')\n for o in fig.findobj(myfunc):\n o.set_linewidth(Line_size)\n \n def myfunc(x):\n return hasattr(x, 'set_markersize')\n for o in fig.findobj(myfunc):\n o.set_markersize(Line_size+4)\n def myfunc(x):\n return hasattr(x, 'set_markeredgewidth')\n for o in fig.findobj(myfunc):\n o.set_markeredgewidth(Line_size)\n for ax in fig.axes:\n \n # trouve tous les trucs avec linewidth et les modifie\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(Axe_tick_size)\n \n #for item in ([ax.xaxis.label, ax.yaxis.label]):\n # item.set_fontsize(Axe_tick_size+5)\n for line in ax.get_xticklines() + ax.get_yticklines():\n line.set_markersize(Line_size+2)\n line.set_markeredgewidth(Line_size) \n \n \n \n #fig.show()", "def plot_curves_lenstool(label = ' ', courves_file = 'ce.dat', marker = 'm.', \\\n plt_show = False):\n# x_ca, y_ca = np.loadtxt( courves_file, usecols=(3, 4), unpack=True )\n# x_cc, y_cc = np.loadtxt( courves_file, usecols=(1, 2), unpack=True )\n\n x_cc, y_cc, x_ca, y_ca = \\\n np.loadtxt( courves_file, usecols=(1, 2, 3, 4), unpack=True )\n\n plt.figure(1, figsize=(16, 8))\n\n plt.subplot(1, 2, 1).set_aspect(1)\n plt.plot(x_ca, y_ca, marker, linewidth=3, label = label)\n axis_limit = max( 1.075*max(np.absolute(x_ca)), \\\n 1.075*max(np.absolute(y_ca)) )\n plt.axis([-axis_limit, axis_limit, -axis_limit, axis_limit])\n\n plt.subplot (1, 2, 2).set_aspect(1)\n plt.plot(x_cc, y_cc, marker, linewidth=3, label = label)\n axis_limit = max( 1.075*max(np.absolute(x_cc)), \\\n 1.075*max(np.absolute(y_cc)) )\n plt.axis([-axis_limit, axis_limit, -axis_limit, axis_limit])\n plt.legend()\n\n if plt_show:\n plt.show()\n plt.close()", "def making_plot(sample_points_x_y_nonZero, gauge_volume, y_upper_imit, y_lower_limit,\n sample_height=10, sample_width=5., min_color=None, max_color = None):\n if sample_points_x_y_nonZero.size==0:\n print \"the array does not have a non zero gauge volume\"\n\n\n else:\n\n xS, yS=sample_points_x_y_nonZero\n X,Y= np.meshgrid(xS,yS)\n\n gauge_volume=np.array(gauge_volume)\n\n Z = griddata((xS,yS), gauge_volume, (X,Y), method='nearest')\n\n plt.figure()\n # r=plt.contour( X, Y,Z)\n # plt.clabel(r, inline=1, fontsize=10)\n plt.pcolormesh(X, Y, Z, cmap = plt.get_cmap('rainbow'),vmin=min_color, vmax=max_color )\n plt.xlabel('points along sample width (mm)')\n plt.ylabel('points along sample height (mm)')\n plt.ylim(y_lower_limit,y_upper_imit)\n plt.colorbar()\n plt.axhline(y=-sample_height/2., color='r', linestyle='-')\n plt.axhline(y=sample_height/2., color='r', linestyle='-')\n plt.axvline(x=- sample_width/2., color='r', linestyle='-')\n plt.axvline(x= sample_width/2., color='r', linestyle='-')\n # plt.scatter(xS,yS ,marker = 'o', c = 'b', s = 5, zorder = 10)\n plt.savefig(os.path.join(thisdir, '../figures/{sample}.png'.format(sample='gauge_volume')))\n plt.show()", "def parameter_forecast_plot(model_obj,time_index,start,end,num_samples = 100,cached_samples=None,col_labels = ['P','PET','Lag-1 Q','Lag-1 P','Seasonal','P$^2$','Constant']):\n \n f = plt.figure(figsize = (8,10))\n num_components = len(col_labels)\n gs = gridspec.GridSpec(8+2*num_components,6)\n ax0 = plt.subplot(gs[-8:-6,:])\n ax1 = plt.subplot(gs[-6::,:])\n col_labels = ['P','PET','Lag-1 Q','Lag-1 P','Seasonal','P$^2$','Constant']\n ffbs = model_obj # 120 is French Broad River at Blantyre, NC\n if cached_samples is None:\n samples = ffbs.backward_sample(num_samples=num_samples)\n else: \n samples = cached_samples\n for i in range(7):\n ax_new = plt.subplot(gs[2*i:2*i+2,:])\n\n upper = np.percentile(samples[start:end,i,:],75,axis = 1)\n mid = np.percentile(samples[start:end,i,:],50,axis = 1)\n lower = np.percentile(samples[start:end,i,:],25,axis = 1)\n\n ax_new.plot(time_index[start:end],mid,color='k')\n ax_new.fill_between(time_index[start:end],upper,lower,color='0.8')\n ax_new.tick_params(labelbottom=False,direction='in')\n ax_new.text(0.02, 0.82,col_labels[i],\n horizontalalignment='left',\n verticalalignment='center',transform=ax_new.transAxes)\n\n ax1.plot(time_index[start:end],ffbs.f[start:end],color='k',label='1-step forecast')\n ax1.plot(time_index[start:end],ffbs.Y[start:end],color='k',linestyle='',marker='+',\n markersize = 10,label='Observed streamflow')\n\n ax1.fill_between(time_index[start:end],\n np.squeeze(ffbs.f[start:end] + 2*ffbs.Q[start:end,0]),\n np.squeeze(ffbs.f[start:end] - 2*ffbs.Q[start:end,0]),color='0.8',\n label = 'Forecast $\\pm 2V_t$')\n ax1.tick_params(direction='in')\n ax1.legend(loc='upper right',ncol=1,frameon=True)\n #ax1.set_ylabel('Standardized streamflow')\n ax1.set_xlabel('Date',fontsize=16)\n ax1.get_yaxis().set_label_coords(-0.1,0.5)\n ax1.text(0.02, 0.92,'Standardized streamflow',\n horizontalalignment='left',\n verticalalignment='center',transform=ax1.transAxes,)\n ax0.plot(time_index[start:end],ffbs.s[start:end],color='k')\n ax0.text(0.02, 0.82,'$E[V_t]$',\n horizontalalignment='left',\n verticalalignment='center',transform=ax0.transAxes,)\n ax0.get_yaxis().set_label_coords(-0.1,0.5)\n return f,samples", "def create_bk_fig(x=None, xlab=None, x_min=None, x_max=None,\n ylab=None, fh=None, fw=None,\n title=None, pw=None, ph=None, x_axis_type=\"linear\",\n y_axis_type=\"linear\", x_name=None, y_name=None, **kwargs):\n\n add_grid = kwargs.pop(\"add_grid\", False)\n add_title = kwargs.pop(\"add_title\", True)\n add_xaxis = kwargs.pop(\"add_xaxis\", False)\n add_yaxis = kwargs.pop(\"add_yaxis\", False)\n fix_plotsize = kwargs.pop(\"fix_plotsize\", True)\n # addition plot specs\n pl_specs = kwargs.pop(\"pl_specs\", {})\n # additional axis specs\n ax_specs = kwargs.pop(\"ax_specs\", {})\n # ticker specs\n ti_specs = kwargs.pop(\"ti_specs\", {})\n\n plot_specs = dict(background=\"white\", border_fill_alpha=0.1,\n border_fill_color=\"white\", min_border=3,\n name=\"plot\", outline_line_dash=\"solid\",\n outline_line_width=2, outline_line_color=\"#017afe\",\n outline_line_alpha=0.4, output_backend=\"canvas\",\n sizing_mode=\"stretch_width\", title_location=\"above\",\n toolbar_location=\"above\")\n plot_specs.update(pl_specs)\n\n axis_specs = dict(minor_tick_line_alpha=0, axis_label_text_align=\"center\",\n axis_label_text_font=\"monospace\",\n axis_label_text_font_size=\"10px\",\n axis_label_text_font_style=\"normal\",\n major_label_orientation=\"horizontal\")\n axis_specs.update(ax_specs)\n\n tick_specs = dict(desired_num_ticks=5)\n tick_specs.update(ti_specs)\n\n # Define frame width and height\n # This is the actual size of the plot without the titles et al\n if fix_plotsize and not(fh or fw):\n fw = int(0.98 * pw)\n fh = int(0.93 * ph)\n\n # define the axes ranges\n x_range = DataRange1d(name=\"p_x_range\", only_visible=True)\n\n y_range = DataRange1d(name=\"p_y_range\", only_visible=True)\n\n if x_min is not None and x_max is not None and x_name.lower() in [\"channel\", \"frequency\"]:\n x_range = Range1d(name=\"p_x_range\", start=x_min, end=x_max)\n y_range.only_visible = False\n\n # define items to add on the plot\n p_htool = HoverTool(tooltips=[(x_name, \"$x\"),\n (y_name, \"$y\")],\n name=\"p_htool\", point_policy=\"snap_to_data\")\n\n if x_name.lower() == \"time\":\n p_htool.tooltips[0] = (x_name, \"$x{%d-%m-%Y %H:%M}\")\n p_htool.formatters = {\"$x\": \"datetime\"}\n\n p_toolbar = Toolbar(name=\"p_toolbar\",\n tools=[p_htool, BoxSelectTool(), BoxZoomTool(),\n # EditTool(), # BoxEditTool(), # RangeTool(),\n LassoSelectTool(), PanTool(), ResetTool(),\n SaveTool(), UndoTool(), WheelZoomTool()])\n p_ticker = BasicTicker(name=\"p_ticker\", **tick_specs)\n\n # select the axis scales for x and y\n if x_axis_type == \"linear\":\n x_scale = LinearScale(name=\"p_x_scale\")\n # define the axes and tickers\n p_x_axis = LinearAxis(axis_label=xlab, name=\"p_x_axis\",\n ticker=p_ticker, **axis_specs)\n elif x_axis_type == \"datetime\":\n x_scale = LinearScale(name=\"p_x_scale\")\n # define the axes and tickers\n p_x_axis = DatetimeAxis(axis_label=xlab, name=\"p_x_axis\",\n ticker=p_ticker, **axis_specs)\n elif x_axis_type == \"log\":\n x_scale = LogScale(name=\"p_x_scale\")\n p_x_axis = LogAxis(axis_label=xlab, name=\"p_x_axis\",\n ticker=p_ticker, **axis_specs)\n\n if y_axis_type == \"linear\":\n y_scale = LinearScale(name=\"p_y_scale\")\n # define the axes and tickers\n p_y_axis = LinearAxis(axis_label=ylab, name=\"p_y_axis\",\n ticker=p_ticker, **axis_specs)\n elif x_axis_type == \"datetime\":\n y_scale = LinearScale(name=\"p_y_scale\")\n # define the axes and tickers\n p_y_axis = DatetimeAxis(axis_label=xlab, name=\"p_y_axis\",\n ticker=p_ticker, **axis_specs)\n elif y_axis_type == \"log\":\n y_scale = LogScale(name=\"p_y_scale\")\n # define the axes and tickers\n p_y_axis = LogAxis(axis_label=ylab, name=\"p_y_axis\",\n ticker=p_ticker, **axis_specs)\n\n # Create the plot object\n p = Plot(plot_width=pw, plot_height=ph, frame_height=fh, frame_width=fw,\n toolbar=p_toolbar, x_range=x_range, x_scale=x_scale,\n y_range=y_range, y_scale=y_scale, **plot_specs)\n\n if add_title:\n p_title = Title(align=\"center\", name=\"p_title\", text=title,\n text_font_size=\"24px\",\n text_font=\"monospace\", text_font_style=\"bold\",)\n p.add_layout(p_title, \"above\")\n\n if add_xaxis:\n p.add_layout(p_x_axis, \"below\")\n\n if add_yaxis:\n p.add_layout(p_y_axis, \"left\")\n\n if add_grid:\n p_x_grid = Grid(dimension=0, ticker=p_ticker)\n p_y_grid = Grid(dimension=1, ticker=p_ticker)\n p.add_layout(p_x_grid)\n p.add_layout(p_y_grid)\n\n return p", "def plot(self):\n # -- plotting\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes([0.08, 0.12, 0.55, 0.85])\n ax.plot(self.raw['stress'][1:], self.raw['e'][1:], ls=(0, (1, 1)),\n marker='o', lw=1.5, c='k', mfc='w', label='Experimental data')\n ax.plot(self.sigmaV, self.eSigmaV, ls='', marker='|', c='r', ms=15,\n mfc='w', mew=1.5,\n label=str().join([r'$\\sigma^\\prime_\\mathrm{v_0}=$ ',\n f'{self.sigmaV:.0f} kPa']))\n # Compression index\n x4Cc = np.linspace(\n self.cleaned['stress'].iloc[-4], self.cleaned['stress'].iloc[-1])\n y4Cc = -self.idxCc * np.log10(x4Cc) + self.idxCcInt\n ax.plot(x4Cc, y4Cc, ls='-', lw=1.125, color=colors[1],\n label=str().join([r'$C_\\mathrm{c}=$', f'{self.idxCc:.3f}']))\n if self.fitCc:\n ax.plot(self.cleaned['stress'].iloc[self.maskCc],\n self.cleaned['e'].iloc[self.maskCc], ls='', marker='x',\n color=colors[1],\n label=f'Data for linear fit\\n(R$^2={self.r2Cc:.3f}$)')\n # Recompression index\n x4Cr = np.linspace(self.raw['stress'].iloc[self.maskCr].min(),\n self.raw['stress'].iloc[self.maskCr].max())\n y4Cr = -self.idxCr * np.log10(x4Cr) + self.idxCrInt\n ax.plot(x4Cr, y4Cr, ls='-', lw=1.125, color=colors[2],\n label=str().join([r'$C_\\mathrm{r}=$', f'{self.idxCr:.3f}']))\n ax.plot(self.raw['stress'].iloc[self.maskCr],\n self.raw['e'].iloc[self.maskCr], ls='', marker='+',\n color=colors[2],\n label=f'Data for linear fit\\n(R$^2={self.r2Cr:.3f}$)')\n # other details\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.set(xscale='log', ylabel='Void ratio, $e$',\n xlabel=str().join(['Effective vertical stress, ',\n r'$\\sigma^\\prime_\\mathrm{v}$ [kPa]']))\n ax.xaxis.set_major_formatter(mtick.ScalarFormatter())\n ax.yaxis.set_minor_locator(mtick.AutoMinorLocator())\n ax.grid(False)\n ax.legend(bbox_to_anchor=(1.125, 0.5), loc=6,\n title=r\"\\textbf{Compressibility curve}\")\n return fig", "def plot_train_and_valid_curves(ax, train_points, valid_points, learning_rate_updates_epoch, best_per_lr, mode=\"loss\"):\n if mode==\"loss\":\n name = \"Loss\"\n names = \"losses\"\n factor = [1.2, 1.22]\n loc_legend = 1\n elif mode ==\"acc\":\n name = \"Accuracy\"\n names = \"acc\"\n factor = [0.9, 0.88]\n loc_legend = 4\n else:\n print \"Mode not understood. Available modes : 'loss' and 'acc'\"\n return\n\n #ax = plt.subplot(1,1,1)#\n # Plot training and valid loss curves\n ax.plot(np.arange(len(train_points)),train_points, c=\"k\", zorder=1)\n ax.plot(np.arange(len(valid_points)),valid_points, c=\"k\", zorder=1)\n ax.scatter(np.arange(len(train_points)),train_points, c=\"b\", label=\"Train %s\"%names, zorder=2)\n ax.scatter(np.arange(len(valid_points)),valid_points, c=\"r\", label=\"Valid %s\"%names, zorder=2)\n # Plot vertical line when the learning rate was updated\n first = True\n for elem in learning_rate_updates_epoch:\n if first:\n plt.plot([elem-.5,elem-.5], [1.4*valid_points[elem],train_points[elem]*0.6], c=\"k\", label=\"LR updates\", linestyle=\"--\")\n first = False\n else:\n plt.plot([elem-.5,elem-.5], [1.4*valid_points[elem],train_points[elem]*0.6], c=\"k\", linestyle=\"--\")\n # Plot best model in each region\n first = True\n for i,elem in enumerate(best_per_lr):\n if first:\n x = elem[0]\n y = elem[1]\n plt.scatter(x,y, c=\"g\", label=\"Best models\", marker=\"*\", zorder=3, s=100)\n plt.plot([x,x],[y,factor[0]*y], c=\"g\")\n plt.text(x,factor[1]*y, \"Epoch %d\"%(x), fontsize=8)\n first = False\n else:\n x = elem[0]+learning_rate_updates_epoch[i-1]\n y = elem[1]\n plt.scatter(x,y, c=\"g\", marker=\"*\", zorder=3, s=100)\n plt.plot()\n plt.plot([x,x],[y,factor[0]*y], c=\"g\")\n plt.text(x,factor[1]*y, \"Epoch %d\"%(x), fontsize=8)\n # Xlim, Ylim, labels, legend...\n ax.set_ylim([0,1])\n ax.set_xlim([0,len(train_points)+5])\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(name)\n handles,labels = ax.get_legend_handles_labels()\n sorted_zip = sorted(zip([2,0,1,3],handles, labels))\n index, handles, labels = zip(*sorted_zip)\n ax.legend(handles,labels, loc=loc_legend, prop={'size':10})", "def sample_and_plot(S0, K, B, T, N, u, d, q, M, barrier_type):\n paths = sample_paths(S0, N, u, d, q, M)\n p_valid, p_invalid, p_counts = split_paths(paths, B, K,\n barrier_type, option)\n\n times = np.linspace(0, T, N + 1)\n\n plt.figure(figsize=(10, 7))\n ax1 = plt.subplot2grid((1, 1), (0, 0))\n ax1.set_ylabel('Stock price (log-scale)')\n ax1.set_xlabel('time')\n for path in p_invalid:\n ax1.plot(times, path, c='lightcoral')\n for path in p_valid:\n ax1.plot(times, path, c='grey')\n for path in p_counts:\n ax1.plot(times, path, c='blue')\n\n custom_lines = [Line2D([0], [0], c='lightcoral', lw=2),\n Line2D([0], [0], c='grey', lw=2),\n Line2D([0], [0], c='blue', lw=2),\n Line2D([0], [0], c='red', ls=':', lw=2),\n Line2D([0], [0], c='navy', ls=':', lw=2)]\n\n ax1.axhline(y=K, lw=4, c='navy', ls=':', label='Strike Price')\n ax1.axhline(y=B, lw=4, c='red', ls=':', label='Barrier')\n\n plt.yscale('log')\n ax1.legend(custom_lines, ['invalid (barrier)', 'invalid (option)', 'valid',\n 'barrier', 'strike price'])\n # plt.savefig('up-and-out_call.png', transparent=True)\n plt.show()", "def plot_rbf_control_points(parameters, save_fig=False):\n fig = plt.figure(1)\n axes = fig.add_subplot(111, projection='3d')\n orig = axes.scatter(parameters.original_control_points[:, 0], \\\n parameters.original_control_points[:, 1], \\\n parameters.original_control_points[:, 2], c='blue', marker='o')\n defor = axes.scatter(parameters.deformed_control_points[:, 0], \\\n parameters.deformed_control_points[:, 1], \\\n parameters.deformed_control_points[:, 2], c='red', marker='x')\n\n axes.set_xlabel('X axis')\n axes.set_ylabel('Y axis')\n axes.set_zlabel('Z axis')\n\n plt.legend((orig, defor), \\\n ('Original', 'Deformed'), \\\n scatterpoints=1, \\\n loc='lower left', \\\n ncol=2, \\\n fontsize=10)\n\n # Show the plot to the screen\n if not save_fig:\n plt.show()\n else:\n fig.savefig('RBF_control_points.png')", "def plot_ave(pulse, trap, ToP):\n time_array = np.linspace(0, pulse.t * trap.N, trap.N + 1)\n # fig, ax = plt.subplots()\n all_trial_n, all_trial_n_ave = trap.sideband_cool_sch(pulse, ave = True)\n if ToP == 'a':\n plt.plot(time_array * 1e3, all_trial_n_ave, label = pulse.t)\n if ToP == 'b':\n plt.plot(time_array * 1e3, all_trial_n_ave, color = 'magenta', linewidth = 3, label = 'Monte Carlo')\n if ToP == 'c':\n plt.plot(time_array * 1e3, all_trial_n_ave, color = 'b')\n if ToP == 'd':\n if trap.no_decay == True and trap.off_resonant_excite == False:\n plt.plot(time_array * 1e3, all_trial_n_ave, label = 'Decay to carrier')\n if trap.no_decay == False and trap.off_resonant_excite == False:\n plt.plot(time_array * 1e3, all_trial_n_ave, \n label = 'Decay to %s sideband'%(trap.sideband))\n if trap.no_decay == False and trap.off_resonant_excite == True:\n plt.plot(time_array * 1e3, all_trial_n_ave, \n label = 'Decay to %s sideband, off-resonant excite'%(trap.sideband))\n # plt.xlabel('time / ms')\n # plt.ylabel('Phonon State')\n # plt.legend()", "def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)", "def __init__(self, refstd, fig=None, rect=111, label='_', srange=(0, 1.5)):\n\n from matplotlib.projections import PolarAxes\n import mpl_toolkits.axisartist.floating_axes as FA\n import mpl_toolkits.axisartist.grid_finder as GF\n\n self.refstd = refstd # Reference standard deviation\n\n tr = PolarAxes.PolarTransform()\n\n # Correlation labels\n rlocs = NP.concatenate((NP.arange(10)/10., [0.95, 0.99]))\n tlocs = NP.arccos(rlocs) # Conversion to polar angles\n gl1 = GF.FixedLocator(tlocs) # Positions\n tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))\n\n # Standard deviation axis extent (in units of reference stddev)\n self.smin = srange[0]*self.refstd\n self.smax = srange[1]*self.refstd\n\n ghelper = FA.GridHelperCurveLinear(tr,\n extremes=(0, NP.pi/2, # 1st quadrant\n self.smin, self.smax),\n grid_locator1=gl1,\n tick_formatter1=tf1)\n\n if fig is None:\n fig = PLT.figure()\n\n ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)\n fig.add_subplot(ax)\n\n # Adjust axes\n ax.axis[\"top\"].set_axis_direction(\"bottom\") # \"Angle axis\"\n ax.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_text(\"Correlation\")\n\n ax.axis[\"left\"].set_axis_direction(\"bottom\") # \"X axis\"\n ax.axis[\"left\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"right\"].set_axis_direction(\"top\") # \"Y axis\"\n ax.axis[\"right\"].toggle(ticklabels=True)\n ax.axis[\"right\"].major_ticklabels.set_axis_direction(\"left\")\n #ax.axis[\"right\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"bottom\"].set_visible(False) # Useless\n\n self._ax = ax # Graphical axes\n self.ax = ax.get_aux_axes(tr) # Polar coordinates\n\n # Add reference point and stddev contour\n l, = self.ax.plot([0], self.refstd, 'k*',\n ls='', ms=10, label=label)\n t = NP.linspace(0, NP.pi/2)\n r = NP.zeros_like(t) + self.refstd\n self.ax.plot(t, r, 'k--', label='_')\n\n # Collect sample points for latter use (e.g. legend)\n self.samplePoints = [l]", "def __init__(self, refstd, fig=None, rect=111, label='_', srange=(0, 1.5)):\n\n from matplotlib.projections import PolarAxes\n import mpl_toolkits.axisartist.floating_axes as FA\n import mpl_toolkits.axisartist.grid_finder as GF\n\n self.refstd = refstd # Reference standard deviation\n\n tr = PolarAxes.PolarTransform()\n\n # Correlation labels\n rlocs = NP.concatenate((NP.arange(10)/10., [0.95, 0.99]))\n tlocs = NP.arccos(rlocs) # Conversion to polar angles\n gl1 = GF.FixedLocator(tlocs) # Positions\n tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))\n\n # Standard deviation axis extent (in units of reference stddev)\n self.smin = srange[0]*self.refstd\n self.smax = srange[1]*self.refstd\n\n ghelper = FA.GridHelperCurveLinear(tr,\n extremes=(0, NP.pi, # 1st quadrant\n self.smin, self.smax),\n grid_locator1=gl1,\n tick_formatter1=tf1)\n\n if fig is None:\n fig = PLT.figure()\n\n ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)\n fig.add_subplot(ax)\n\n # Adjust axes\n ax.axis[\"top\"].set_axis_direction(\"bottom\") # \"Angle axis\"\n ax.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_text(\"Correlation\")\n\n ax.axis[\"left\"].set_axis_direction(\"bottom\") # \"X axis\"\n ax.axis[\"left\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"right\"].set_axis_direction(\"top\") # \"Y axis\"\n ax.axis[\"right\"].toggle(ticklabels=True)\n ax.axis[\"right\"].major_ticklabels.set_axis_direction(\"left\")\n #ax.axis[\"right\"].label.set_text(\"Standard deviation\")\n\n ax.axis[\"bottom\"].set_visible(False) # Useless\n\n self._ax = ax # Graphical axes\n self.ax = ax.get_aux_axes(tr) # Polar coordinates\n\n # Add reference point and stddev contour\n l, = self.ax.plot([0], self.refstd, 'k*',\n ls='', ms=10, label=label)\n t = NP.linspace(0, NP.pi/2)\n r = NP.zeros_like(t) + self.refstd\n self.ax.plot(t, r, 'k--', label='_')\n\n # Collect sample points for latter use (e.g. legend)\n self.samplePoints = [l]", "def rplot(Qz, R, format):\n # plt.hold(True)\n plt.figure()\n for name,xs in zip(('++','+-','-+','--'),R):\n rr = xs.real\n if (rr>1e-8).any():\n plt.plot(Qz, rr, format, label=name + 'r')\n plt.legend()\n plt.figure()\n for name,xs in zip(('++','+-','-+','--'),R):\n ri = xs.imag\n if (ri>1e-8).any():\n plt.plot(Qz, ri, format, label=name + 'i')\n plt.legend()\n\n plt.figure()\n for name,xs in zip(('++','+-','-+','--'),R):\n phi = np.arctan2(xs.imag, xs.real)\n if (ri>1e-8).any():\n plt.plot(Qz, phi, format, label=name + 'i')\n plt.legend()", "def plot(self,displayplt = True,saveplt = False,savepath='',polarplt=True, dbdown = False):\n plt.figure()\n\n #legacy beamprofile data is a 1-D array of the peak negative pressure\n if len(self.hydoutput.shape)<2:\n pnp = self.hydoutput\n else:\n sensitivity = hyd_calibration(self.cfreq)\n pnp = -1*np.min(self.hydoutput,1)/sensitivity\n\n if dbdown:\n pnp = 20.0*np.log10(pnp/np.max(pnp))\n else:\n pnp = pnp*1e-6\n\n figure1 = plt.plot(self.depth, pnp)\n #the latest beamprofile data should be a 2-D array of the hydrophone output\n plt.xlabel('Depth (mm)')\n if dbdown:\n plt.ylabel('Peak Negative Pressure (dB Max)')\n else:\n plt.ylabel('Peak Negative Pressure (MPa)')\n plt.title(self.txdr)\n if displayplt:\n plt.show()\n if saveplt:\n if savepath=='':\n #prompt for a save path using a default filename\n defaultfn = self.txdr+'_'+self.collectiondate+'_'+self.collectiontime+'_depthprofile.png'\n savepath = tkinter.filedialog.asksaveasfilename(initialfile=defaultfn, defaultextension='.png')\n plt.savefig(savepath)\n return figure1, savepath" ]
[ "0.7304228", "0.6550803", "0.6549476", "0.63300985", "0.631119", "0.6244181", "0.614794", "0.61184627", "0.611656", "0.6109644", "0.60801786", "0.6004628", "0.59819883", "0.595841", "0.5869724", "0.58628213", "0.5857794", "0.5838128", "0.5826333", "0.58193934", "0.5785737", "0.5783892", "0.5718445", "0.57179666", "0.57162356", "0.5686101", "0.5677497", "0.56677884", "0.5615513", "0.5614723", "0.56122214", "0.56111014", "0.5609918", "0.56089014", "0.5607785", "0.5605715", "0.56032014", "0.56018186", "0.559122", "0.5588702", "0.5584633", "0.5577609", "0.5575839", "0.5573395", "0.5553148", "0.55493116", "0.5543621", "0.55412424", "0.55396307", "0.55334485", "0.55325747", "0.55307144", "0.55230904", "0.5522842", "0.55195856", "0.55180085", "0.5509136", "0.55081195", "0.550563", "0.5500188", "0.54985046", "0.54981136", "0.54973143", "0.54893005", "0.54892427", "0.54836875", "0.54803616", "0.54793996", "0.5475754", "0.54740745", "0.5469678", "0.5467412", "0.54634285", "0.54632354", "0.54627055", "0.5452122", "0.5451731", "0.54513943", "0.54476553", "0.54248977", "0.5414887", "0.54136395", "0.54114807", "0.54111826", "0.5405079", "0.54042566", "0.5404173", "0.54030275", "0.54017127", "0.539831", "0.5397994", "0.53952783", "0.5391211", "0.539095", "0.53883445", "0.5387356", "0.5384698", "0.53827596", "0.5382494", "0.5377159" ]
0.6976897
1
Main routine for plotting a single roccurve
def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None): # Get a default ax if none is given if ax is None: import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,8)) ax = fig.gca() # Plot the base line ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray') # Plot the single roccurve line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax) line.set_label(bkgs[0].get_category()) # Plot settings ax.set_xlim(0.0, 1.05) ax.set_ylim(0.0, 1.05) ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE) ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE) ax.legend(fontsize=DEFAULT_FONTSIZE) return ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)", "def roc_curve(model, X_train, y_train, X_test, y_test, train=True):\n from sklearn.metrics import roc_curve\n if train==True:\n ypredTrain = model.predict(X_train)\n fpr, tpr, thresholds = roc_curve(y_train, ypredTrain)\n plt.plot(fpr, tpr, linewidth=3, label=None, color='r', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel('False Positive Rate', size=12)\n plt.ylabel('True Positive Rate', size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4' \n plt.title(\"ROC Curve: Sensitivity/Specificity Trade-off\\n\\n(Train)\\n\", size=14)\n plt.show()\n elif train==False:\n ypredTest = model.predict(X_test)\n fpr, tpr, thresholds = roc_curve(y_test, ypredTest)\n plt.plot(fpr, tpr, linewidth=3, label=None, color='b', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel('False Positive Rate', size=12)\n plt.ylabel('True Positive Rate', size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4'\n plt.title('ROC Curve: Sensitivity/Specificity Trade-off\\n\\n(Test)\\n', size=14)\n plt.show()", "def _roc_plot(self, roc_curves):\n # figure\n p = default_figure(\n {\n \"x_range\": (-0.01, 1.1),\n \"y_range\": (-0.01, 1.1),\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\"\n }\n )\n\n # main lines added to the plot\n self._default_models_lines(p, roc_curves)\n\n # baseline comparison\n p.line(\n [0, 1], # line x=y\n [0, 1],\n line_dash=\"dashed\",\n line_width=1,\n color=self.plot_design.models_dummy_color,\n legend_label=\"Random Baseline\",\n muted_alpha=0.5 # clicked line in the Legend will be muted\n )\n\n # plot specific styling\n p.legend.location = \"bottom_right\"\n p.xaxis.axis_label = \"False Positive Rate\"\n p.yaxis.axis_label = \"True Positive Rate\"\n\n return p", "def plot_roc_curve(x_data, labels, net, plotfile,\n title=''):\n\n # Have the net predict, then split the scores by ground truth\n scores = net.predict(x_data)\n\n distfile = PLOTDIR / plotfile.replace('roc', 'dist')\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 12))\n\n df = pd.DataFrame({'Condition': ['Positive' if int(i) == 1 else 'Negative'\n for i in labels[0, :]],\n 'Score': scores[0, :]})\n sns.violinplot(x='Condition', y='Score', data=df, ax=ax)\n ax.set_title('{} Dist for Rap1 Identification'.format(title))\n\n fig.savefig(str(distfile))\n\n plt.close()\n\n fp_rate, tp_rate = calc_roc(scores[labels], scores[~labels])\n\n # Make the plot\n plotfile = PLOTDIR / plotfile\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 12))\n\n ax.plot(fp_rate, tp_rate, '-o', linewidth=3)\n\n # Plot the line for perfect confusion\n ax.plot([0, 1], [0, 1], '--', linewidth=3)\n\n ax.set_title('{} ROC for Rap1 Identification'.format(title))\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n ax.set_xlim([-0.01, 1.01])\n ax.set_ylim([-0.01, 1.01])\n\n fig.savefig(str(plotfile))\n plt.close()", "def roc2(fpr, tpr, roc_auc):\n plt.figure()\n plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()", "def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()", "def plot():\n pass", "def roc_plot(label, fpr, tpr, roc_auc):\n plt.figure()\n for i in range(len(label)):\n plt.plot(fpr[i], tpr[i], label=label[i] + ' AUC = %0.2f' % roc_auc[i], alpha=0.75)\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([-0.01, 1.01])\n plt.ylim([-0.01, 1.01])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC Curve')\n plt.legend(loc='lower right')\n plt.show()", "def plotCaliCurve(constants, data, outName):\n x=np.linspace(min(data[:,0]),max(data[:,0]),1000)\n plt.figure()\n plt.rcParams.update({'font.size' : 16})\n plt.scatter(data[:,0],data[:,1])\n plt.plot(x,LangmuirCurve(x,constants[0],constants[1],constants[2],constants[3]))\n #plt.xlabel(\"MG Concentration (nM)\")\n #plt.ylabel(\"Relative SHS signal (Arb. Units)\")\n plt.savefig(outName + \"_cali_model_plot.png\")\n plt.show()", "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "def plot(self):\n\t\tself.plotOfCos1().plot()", "def plot(self):\n\t\t\t\n\t\tfig,p1=_plt.subplots(4,sharex=True)\n\t\tp1[0].plot(self.time*1e3,self.eRogA,label='Rogowski A')\n\t\tp1[1].plot(self.time*1e3,self.eRogB,label='Rogowski B')\n\t\tp1[2].plot(self.time*1e3,self.eRogC,label='Rogowski C')\n\t\tp1[3].plot(self.time*1e3,self.eRogD,label='Rogowski D')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "def plotCurves(self, dataByModel):\n prFigure = pyplot.figure()\n self.configChart()\n prAx = prFigure.add_subplot(111)\n prAx.set_xlabel('Recall')\n prAx.set_ylabel('Precision')\n prAx.set_title('PR Curve')\n prAx.grid(True)\n\n rocFigure = pyplot.figure()\n self.configChart()\n rocAx = rocFigure.add_subplot(111)\n rocAx.set_xlabel('Fallout / FPR')\n rocAx.set_ylabel('Recall')\n rocAx.set_title('ROC Curve')\n rocAx.grid(True)\n\n corrFigure = pyplot.figure()\n self.configChart()\n corrAx = corrFigure.add_subplot(111)\n corrAx.set_xlabel('predict score')\n corrAx.set_ylabel('real score')\n corrAx.set_title('Correlation Curve')\n corrAx.grid(True)\n\n precisionFigure = pyplot.figure()\n self.configChart()\n precisionAx = precisionFigure.add_subplot(111)\n precisionAx.set_xlabel('score')\n precisionAx.set_ylabel('Precision')\n precisionAx.set_title('Threshold score vs precision')\n precisionAx.grid(True)\n\n recallFigure = pyplot.figure()\n self.configChart()\n recallAx = recallFigure.add_subplot(111)\n recallAx.set_xlabel('score')\n recallAx.set_ylabel('Recall')\n recallAx.set_title('Threshold score vs recall')\n recallAx.grid(True)\n\n falloutFigure = pyplot.figure()\n self.configChart()\n falloutAx = falloutFigure.add_subplot(111)\n falloutAx.set_xlabel('score')\n falloutAx.set_ylabel('Fallout (False Positive Rate)')\n falloutAx.set_title('Threshold score vs fallout')\n falloutAx.grid(True)\n\n for (model, data) in list(dataByModel.items()):\n (recalls, precisions) = list(zip(*(data['PR'])))\n prAx.plot(recalls, precisions, marker='o', linestyle='--', label=model)\n\n (fallouts, recalls) = list(zip(*(data['ROC'])))\n rocAx.plot(fallouts, recalls, marker='o', linestyle='--', label=model)\n\n (pCtrs, eCtrs) = list(zip(*(data['CORR'])))\n corrAx.plot(pCtrs, eCtrs, label=model)\n\n (score, recall, precision, fallout) = list(zip(*(data['cutoff'])))\n\n recallAx.plot(score, recall, label=model + '_recall')\n precisionAx.plot(score, precision, label=model + '_precision')\n falloutAx.plot(score, fallout, label=model + '_fallout')\n\n # saving figures\n ensure_dir(self.output_dir)\n prAx.legend(loc='upper right', shadow=True)\n prFigure.savefig('%s/pr_curve.png' % self.output_dir)\n\n rocAx.legend(loc='lower right', shadow=True)\n rocFigure.savefig('%s/roc_curve.png' % self.output_dir)\n\n corrAx.legend(loc='upper left', shadow=True)\n corrFigure.savefig('%s/corr_curve.png' % self.output_dir)\n\n precisionAx.legend(loc='upper left', shadow=True)\n precisionFigure.savefig('%s/precision.png' % self.output_dir)\n\n recallAx.legend(loc='lower left', shadow=True)\n recallFigure.savefig('%s/recall.png' % self.output_dir)\n\n falloutAx.legend(loc='upper right', shadow=True)\n falloutFigure.savefig('%s/fallout.png' % self.output_dir)\n\n pyplot.close()\n pngs = '{result}/pr_curve.png {result}/roc_curve.png {result}/corr_curve.png {result}/precision.png {result}/recall.png {result}/fallout.png'.format(result=self.output_dir)\n print('png: ', pngs)", "def pr_curve(model, X_train, y_train, X_test, y_test, train=True):\n from sklearn.metrics import precision_recall_curve\n if train==True:\n ypredTrain = model.predict(X_train) \n precisions, recalls, thresholds = precision_recall_curve(y_train, ypredTrain)\n plt.plot(precisions, recalls, linewidth=3, color='r', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel(\"Precision\", size=12)\n plt.ylabel(\"Recall\", size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4' \n plt.title(\"PR Curve: Precision/Recall Trade-off\\n\\n(Train)\\n\", size=14) \n plt.show()\n elif train==False:\n ypredTest = model.predict(X_test)\n precisions, recalls, thresholds = precision_recall_curve(y_test, ypredTest)\n plt.plot(precisions, recalls, linewidth=3, color='b', linestyle='-')\n plt.rc('xtick', labelsize=10) \n plt.rc('ytick', labelsize=10) \n plt.xlabel(\"Precision\", size=12)\n plt.ylabel(\"Recall\", size=12)\n plt.grid()\n plt.rcParams['figure.facecolor'] = '#F2F3F4' \n plt.rcParams['axes.facecolor'] = '#F2F3F4'\n plt.title(\"PR Curve: Precision/Recall Trade-off\\n\\n(Test)\\n\", size=14)\n plt.show()", "def rocs(test_set_y_org,test_set_y_pred_prob,methods,linestyles,classes_unique,plot_curve=False,filename=\"./fig_roc.pdf\",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):\n from sklearn.metrics import roc_curve\n #from sklearn.metrics import auc\n from sklearn.metrics import roc_auc_score\n from scipy import interp\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n \n n_classes=len(classes_unique)\n test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)\n\n num_methods=len(methods)\n roc_aucs=[0]*num_methods\n names=[None]*num_methods\n for m in range(num_methods):\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for c in range(n_classes):\n fpr[c], tpr[c], _ = roc_curve(test_set_Y_org[:, c], test_set_y_pred_prob[m][:, c])\n roc_auc[c] = roc_auc_score(test_set_Y_org[:, c], test_set_y_pred_prob[m][:, c])\n\n # Compute macro-average ROC curve and AUROC area\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[c] for c in range(n_classes)]))\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for c in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[c], tpr[c])\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n #roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n # Compute micro-average PRC curve and PRC areas\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(test_set_Y_org.ravel(), test_set_y_pred_prob[m].ravel())\n roc_auc[\"macro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average=\"macro\") # micro macro, weighted, or samples\n roc_auc[\"micro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m],average=\"micro\") # micro macro, weighted, or samples\n roc_auc[\"weighted\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average=\"weighted\") # micro macro, weighted, or samples\n roc_auc[\"samples\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average=\"samples\") # micro macro, weighted, or samples\n\n if plot_curve:\n if m==0:\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n ax.plot([0, 1], [0, 1], 'k--')\n if n_classes>2 or positive_class_for_two_classes is None:\n ax.plot(fpr[\"macro\"], tpr[\"macro\"], linestyle=linestyles[m],linewidth=1,color=colors[n_classes],label='{0}: macro-avg ROC (area={1:0.4f})'.format(methods[m], roc_auc[\"macro\"]))\n\n for c in range(n_classes):\n if positive_class_for_two_classes is None or (n_classes==2 and positive_class_for_two_classes==c):\n ax.plot(fpr[c], tpr[c],linestyle=linestyles[m],linewidth=1,color=colors[c],label='{0}: ROC of {1} (area={2:0.4f})'.format(methods[m], classes_unique[c], roc_auc[c]))\n\n # add some text for labels, title and axes ticks\n if m==num_methods-1:\n ax.set_ylim(0.0,1.0)\n ax.set_xlim(0.0,1.0)\n ax.set_ylabel(\"True Positive Rate\",fontsize=12)\n ax.set_xlabel(\"False Positive Rate\",fontsize=12) \n #ax.set_title(\"\",fontsize=15)\n ax.legend(loc=\"lower right\",fontsize=8)\n #plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)\n\n roc_auc_list=[roc_auc[c] for c in range(n_classes)]\n roc_auc_list.extend([roc_auc[\"macro\"],roc_auc[\"micro\"],roc_auc[\"weighted\"],roc_auc[\"samples\"]])\n roc_auc=np.array(roc_auc_list)\n name=[methods[m]+\"_AUROC_\" + c for c in classes_unique]\n name.extend([\"macro\",\"micro\",\"weighted\",\"samples\"])\n name=np.array(name)\n\n roc_aucs[m]=roc_auc\n names[m]=name\n \n return roc_aucs,names", "def plot_roc(X,y,test_preds,fname=\"res/roc.png\"):\n\t#Retrieve multiple fpr and tpr values for different thresholds\n\tfpr, tpr, thresholds = roc_curve(y,test_preds)\n\tplt.plot(fpr, tpr)\n\tplt.title(auc(fpr, tpr))\n\tplt.savefig(fname, bbox_inches='tight')\n\tplt.close()", "def plotPsCurve(mcoolsPath:list,celltypeNames:list,chroms:list,resolution=100000,title=\"P(s) curve\",plotType=\"interaction\",base=1.1,log_x=True,log_y=True):\n import plotly.express as px\n from IPython.display import Image\n\n #Calculate P(s) data, get a 3 column pd.DataFrame with (bin,resolution,celltype)\n psDataAll = []\n for i in range(len(mcoolsPath)):\n psDataAll.append(compartment.getPsData(mcoolsPath[i],[\"chr\"+str(i+1) for i in range(len(chroms))],resolution=resolution,celltype=celltypeNames[i],base=base)) \n merged = pd.concat(psDataAll)\n\n data = pd.merge(merged,merged.groupby(\"celltype\").sum(),how=\"left\",on=\"celltype\").assign(prob= lambda df: df.aveCount_x/df.aveCount_y)\n\n fig = px.line(x=data[\"bin_x\"]*resolution,y=data[\"prob\"],color=data[\"celltype\"],title=title,log_x=log_x,log_y=log_y).update_layout(template='simple_white')\n fig.update_layout(width=800,height=600)\n fig.update_layout(xaxis_title=\"Genomic Distance(bp)\",\n yaxis_title=\"Contact Probability\")\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n # plt.show()\n return buf", "def plot_r(self):\n for k, v, o in self.data:\n self.plot_r1(k, v, o)", "def plot_roc_distributions(self, model_str, resampling_number, roc_curve_steps, roc_plot_path):\n sampling_types = ['Normal', 'Oversampling', 'Undersampling']\n\n PLOT_MARGIN = 0.05\n plt.rcParams[\"figure.figsize\"] = (16, 9)\n plt.subplots_adjust(wspace=0.2, hspace=0.4)\n sub_plot_index = 1\n\n for sampling_type in sampling_types:\n mean_fpr, mean_tpr, mean_threshold, mean_auc, std_auc = self._compute_mean_auc_data(sampling_type, model_str, resampling_number, roc_curve_steps)\n\n plt.subplot(int('22' + str(sub_plot_index)))\n\n sub_plot_index += 1\n\n plt.plot(mean_fpr, mean_tpr, color='g', label='AUC:{0}, STD:{1}'.format(round(mean_auc, 2), round(std_auc, 2)))\n plt.plot(mean_fpr, mean_threshold, linestyle='--', lw=2, color='b', label='Thresholds')\n plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance')\n\n plt.xlim([0 - PLOT_MARGIN, 1 + PLOT_MARGIN])\n plt.ylim([0 - PLOT_MARGIN, 1 + PLOT_MARGIN])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(sampling_type + ' ROC Distribution')\n plt.legend(loc=\"lower right\")\n\n plt.savefig(roc_plot_path)\n plt.clf()", "def plot_roc_curve(y_true, y_pred_prob, show_threshold=False, **params):\n\n figure = plt.figure(figsize=params.get('figsize', (17, 10)))\n fpr, tpr, thresholds = roc_curve(y_true, y_pred_prob)\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, label='ROC curve (area = %0.5f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xticks(np.arange(0.0, 1.1, step=0.1))\n plt.yticks(np.arange(0.0, 1.1, step=0.1))\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC curve')\n plt.legend(loc=\"lower right\")\n if show_threshold:\n ax2 = plt.gca().twinx()\n ax2.plot(fpr, thresholds, markeredgecolor='r',\n linestyle='dashed', color='r')\n ax2.set_ylabel('Threshold', color='r')\n ax2.set_ylim([0.0, 1.0])\n ax2.set_xlim([0.0, 1.0])\n\n plt.show()\n\n return figure, roc_auc", "def _roc_plot_single(metrics, save_name):\n plt.figure()\n plt.plot([0, 1], [0, 1], \"k--\")\n plt.plot(metrics[\"fpr\"], metrics[\"tpr\"], \"r\", linewidth=2)\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.grid()\n plt.xlabel(\"True Positive Rate\")\n plt.ylabel(\"False Positive Rate\")\n plt.tight_layout()\n plt.savefig(save_name)", "def plot_final_roc(prediction_matrix, model_names, y_test, PATH = None):\n plt.figure(figsize=(10, 8))\n for i, model in enumerate(model_names): \n predictions = prediction_matrix[:,i]\n fpr, tpr, threshholds = roc_curve(y_test, predictions)\n sns.set_style('darkgrid', {'axes.facecolor': '0.9'})\n lw = 2\n plt.plot(fpr, tpr,\n lw=lw, label=f'{model_names[i]} AUC: {round(auc(fpr, tpr), 3)}')\n plt.plot([0, 1], [0, 1], lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.yticks([i/20.0 for i in range(21)], size = 14)\n plt.xticks([i/20.0 for i in range(21)], rotation = 45, size = 14)\n plt.xlabel('False Positive Rate', size =16)\n plt.ylabel('True Positive Rate', size =16)\n plt.title('ROC Curve', size = 20)\n plt.legend(loc='lower right', prop = {\"size\" : 20})\n if PATH:\n plt.savefig(PATH, bbox_inches='tight', transparent = True)\n plt.show()", "def rplot(Qz, R, format):\n # plt.hold(True)\n plt.figure()\n for name,xs in zip(('++','+-','-+','--'),R):\n rr = xs.real\n if (rr>1e-8).any():\n plt.plot(Qz, rr, format, label=name + 'r')\n plt.legend()\n plt.figure()\n for name,xs in zip(('++','+-','-+','--'),R):\n ri = xs.imag\n if (ri>1e-8).any():\n plt.plot(Qz, ri, format, label=name + 'i')\n plt.legend()\n\n plt.figure()\n for name,xs in zip(('++','+-','-+','--'),R):\n phi = np.arctan2(xs.imag, xs.real)\n if (ri>1e-8).any():\n plt.plot(Qz, phi, format, label=name + 'i')\n plt.legend()", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n return buf", "def plot_curve(self, true_values, predictions, ax=None, title='ROC', label='ROC', lw=1, add_auc=True, **kwargs):\n fpr, tpr, _ = roc_curve(true_values, predictions)\n roc_auc = auc(fpr, tpr)\n label_auc = label + ': {:.3f} AUC'.format(roc_auc)\n logging.info('ROC result: %s', label_auc)\n ax.plot(fpr, tpr, lw=lw, label=label_auc if add_auc else label, **kwargs)\n ax.set_title(title)\n ax.set_xlabel('FPR')\n ax.set_ylabel('TPR')\n ax.legend(loc='lower right', frameon=False)\n return ax", "def main():\n ### read parameters from input file\n charfile, headerfile, ofname, nfil, nskip, nbin, rmax, ntotal = read_settings()\n ### loop over the entire trajectory and compute the center of mass, correct for pbc\n r, pol_corr = compute_polarity_correlation(charfile, headerfile, nfil, nskip, nbin, rmax, ntotal)\n\n ### write results to file and generage a plot\n # generate folder structure\n os.system('mkdir ' + ofname)\n # write data of the averaged polarity correlation\n ofile = open(ofname + '/pol_correlation.data', 'w')\n ofile.write('polarity correlation function\\n\\n')\n ofile.write('r_min\\tr_max\\tpol_corrlation\\n')\n for i in range(nbin):\n ofile.write(str(r[i]) + '\\t' + str(r[i+1]) + '\\t' + str(pol_corr[i]) + '\\n')\n ofile.close()\n # gen figure of the averaged polarity correlation\n fig = plt.figure()\n ax = plt.subplot(111)\n ax.plot(0.5*(r[:-1]+r[1:]), pol_corr)\n ax.set_xlabel(r'r [$\\sigma]')\n ax.set_ylabel(r'g_p(r)')\n ax.set_title('Polarity Correlation Function')\n plt.savefig(ofname + '/pol_correlation.png')\n plt.close()\n return", "def plot_curve(self, fig, ax, linewidth=1.5, linestyle='-', color='black', u1=0.00, u2=1.00):\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n u = np.linspace(u1, u2, 501)\n X = np.real(self.get_value(u))\n line, = ax.plot(u, X[0,:])\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n u = np.linspace(u1, u2, 501)\n X, Y = np.real(self.get_value(u))\n line, = ax.plot(X, Y)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n u = np.linspace(u1, u2, 501)\n X, Y, Z = np.real(self.get_value(u))\n line, = ax.plot(X, Y, Z)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(' ')\n # line.set_label(' ')\n\n else: raise Exception('The number of dimensions must be 1, 2 or 3')\n\n return fig, ax", "def plot(self):\n pass", "def plot_ROC(model, x_test, y_test, save_folder): \n predicted = model.predict(x_test).ravel()\n actual = y_test.ravel()\n fpr, tpr, thresholds = roc_curve(actual, predicted, pos_label=None)\n roc_auc = auc(fpr, tpr)\n plt.title('Test ROC AUC')\n plt.plot(fpr, tpr, 'b', label='AUC = %0.3f' % roc_auc)\n plt.legend(loc='lower right')\n plt.plot([0,1],[0,1],'r--')\n plt.xlim([0.0,1.0])\n plt.ylim([0.0,1.0])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.savefig(save_folder + '/ROC.png')\n plt.show()\n plt.close()", "def plot_rfs(self):\n self.xe = self.data['XE']\n self.ye = self.data['YE']\n# self.IE = self.data['IE']\n self.Var = self.data['Var']\n std = np.sqrt(np.mean(self.Var))\n fig = plt.gcf()\n ax = plt.gca()\n ax.set_xlim((np.min(self.xe), np.max(self.xe)))\n ax.set_ylim((np.min(self.ye), np.max(self.ye)))\n for xe, ye in zip(self.xe, self.ye):\n circ = plt.Circle((xe, ye), std, color='b', alpha=0.4)\n fig.gca().add_artist(circ)", "def roc(test_set_y_org,test_set_y_pred_prob,classes_unique,plot_curve=False,filename=\"./fig_roc.pdf\",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):\n from sklearn.metrics import roc_curve\n #from sklearn.metrics import auc\n from sklearn.metrics import roc_auc_score\n from scipy import interp\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n \n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n n_classes=len(classes_unique)\n test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)\n \n for c in range(n_classes):\n fpr[c], tpr[c], _ = roc_curve(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])\n roc_auc[c] = roc_auc_score(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])\n \n # Compute macro-average ROC curve and AUROC area\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[c] for c in range(n_classes)]))\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for c in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[c], tpr[c])\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n #roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n \n # Compute micro-average PRC curve and PRC areas\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(test_set_Y_org.ravel(), test_set_y_pred_prob.ravel())\n roc_auc[\"macro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average=\"macro\") # micro macro, weighted, or samples\n roc_auc[\"micro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob,average=\"micro\") # micro macro, weighted, or samples\n roc_auc[\"weighted\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average=\"weighted\") # micro macro, weighted, or samples\n roc_auc[\"samples\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob, average=\"samples\") # micro macro, weighted, or samples\n\n if plot_curve:\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n ax.plot([0, 1], [0, 1], 'k--')\n if n_classes>2 or positive_class_for_two_classes is None:\n ax.plot(fpr[\"macro\"], tpr[\"macro\"], linewidth=1,color=colors[n_classes],label='macro-avg ROC (area={0:0.4f})'.format(roc_auc[\"macro\"]))\n \n for c in range(n_classes):\n if positive_class_for_two_classes is None or (n_classes==2 and positive_class_for_two_classes==c):\n ax.plot(fpr[c], tpr[c],linewidth=1,color=colors[c],label='ROC of {0} (area={1:0.4f})'.format(classes_unique[c], roc_auc[c]))\n \n # add some text for labels, title and axes ticks\n ax.set_ylim(0.0,1.0)\n ax.set_xlim(0.0,1.0)\n ax.set_ylabel(\"True Positive Rate\",fontsize=12)\n ax.set_xlabel(\"False Positive Rate\",fontsize=12) \n #ax.set_title(\"\",fontsize=15)\n ax.legend(loc=\"lower right\",fontsize=8)\n #plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)\n\n roc_auc_list=[roc_auc[c] for c in range(n_classes)]\n roc_auc_list.extend([roc_auc[\"macro\"],roc_auc[\"micro\"],roc_auc[\"weighted\"],roc_auc[\"samples\"]])\n roc_auc=np.array(roc_auc_list)\n names=[\"AUROC_\" + c for c in classes_unique]\n names.extend([\"macro\",\"micro\",\"weighted\",\"samples\"])\n names=np.array(names)\n return roc_auc,names", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize = 14)\n plt.ylabel(\"TPR\", fontsize = 14)\n plt.title(\"ROC Curve\", fontsize = 14)\n plot = plt.plot(fpr, tpr, linewidth = 2)\n buf = io.BytesIO()\n plt.savefig(buf, format = 'jpeg')\n buf.seek(0)\n plt.close()\n\n return buf", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize = 14)\n plt.ylabel(\"TPR\", fontsize = 14)\n plt.title(\"ROC Curve\", fontsize = 14)\n plot = plt.plot(fpr, tpr, linewidth = 2)\n buf = io.BytesIO()\n plt.savefig(buf, format = 'jpeg')\n buf.seek(0)\n plt.close()\n\n return buf", "def roc(y_true, y_prob, ARGS):\n roc_auc = roc_auc_score(y_true, y_prob)\n if ARGS.graphs:\n fpr, tpr, _ = roc_curve(y_true, y_prob)\n plt.plot(fpr, tpr, color='darkorange', lw=2,\n label='ROC curve (Area = %0.3f)'% roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate (1 - Specifity)')\n plt.ylabel('True Positive Rate (Sensitivity)')\n plt.title('Receiver Operating Characteristic')\n plt.legend(loc=\"lower right\")\n print(f'ROC Curve saved to {ARGS.out_directory}/roc.png')\n plt.savefig(f'{ARGS.out_directory}/roc.png')\n else:\n print('ROC-AUC %0.3f' % roc_auc)", "def plot_roc_curve(tprs, aucs, tag=''):\n fig, ax = plt.subplots()\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n mean_fpr = np.linspace(0, 1, 100)\n\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = np.std(aucs)\n ax.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\n ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic example\")\n ax.legend(loc=\"lower right\")\n plt.tight_layout()\n plt.savefig(f'roc_{tag}.png')\n plt.show()", "def draw_roc_curve(fpr, tpr, labels=None, size_inch=(5, 5), dpi=160, show=False, block=False):\n if not isinstance(fpr, np.ndarray) or not isinstance(tpr, np.ndarray):\n raise AssertionError(\"invalid inputs\")\n if fpr.shape != tpr.shape:\n raise AssertionError(\"mismatched input sizes\")\n if fpr.ndim == 1:\n fpr = np.expand_dims(fpr, 0)\n if tpr.ndim == 1:\n tpr = np.expand_dims(tpr, 0)\n if labels is not None:\n if isinstance(labels, str):\n labels = [labels]\n if len(labels) != fpr.shape[0]:\n raise AssertionError(\"should have one label per curve\")\n else:\n labels = [None] * fpr.shape[0]\n fig = plt.figure(num=\"roc\", figsize=size_inch, dpi=dpi, facecolor=\"w\", edgecolor=\"k\")\n fig.clf()\n ax = fig.add_subplot(1, 1, 1)\n import sklearn.metrics\n for idx, label in enumerate(labels):\n auc = sklearn.metrics.auc(fpr[idx, ...], tpr[idx, ...])\n if label is not None:\n ax.plot(fpr[idx, ...], tpr[idx, ...], \"b\", label=(\"%s [auc = %0.3f]\" % (label, auc)))\n else:\n ax.plot(fpr[idx, ...], tpr[idx, ...], \"b\", label=(\"auc = %0.3f\" % auc))\n ax.legend(loc=\"lower right\")\n ax.plot([0, 1], [0, 1], 'r--')\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n ax.set_ylabel(\"True Positive Rate\")\n ax.set_xlabel(\"False Positive Rate\")\n fig.set_tight_layout(True)\n if show:\n fig.show()\n if block:\n plt.show(block=block)\n return fig\n plt.pause(0.5)\n return fig, ax", "def plotArc(self):\n\n # plot the spectra\n self.spcurve,=self.axes.plot(self.xarr,self.farr,linewidth=0.5,linestyle='-',marker='None',color='b')", "def single_roc_plot(y_true, y_probas, text=None, title='ROC Curves', figsize=None, title_fontsize=\"large\", text_fontsize=\"medium\"):\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n ax.set_title(title, fontsize=title_fontsize)\n fpr, tpr, _ = roc_curve(y_true, y_probas, pos_label=1)\n roc_auc = auc(fpr, tpr)\n ax.plot(fpr, tpr, label='ROC curve '\n '(area = {0:0.2f})'.format(roc_auc),\n color='blue', linewidth=2)\n ax.plot([0, 1], [0, 1], 'k--', lw=2)\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate', fontsize=text_fontsize)\n ax.tick_params(labelsize=text_fontsize)\n ax.legend(loc='lower right', fontsize=text_fontsize)\n return fig", "def plot(self, fig=None, ax=None,\n curve=True, control_points=True, frenet_serret=False, axis_off=False, ticks_off=False):\n\n if fig is None:\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(111)\n ax.set_xlabel('$u$ parameter', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('NURBS curve value', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(12)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(12)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n if axis_off:\n ax.axis('off')\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n fig = plt.figure(figsize=(6, 5))\n ax = fig.add_subplot(111)\n ax.set_xlabel('$x$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('$y$ axis', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(12)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(12)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n if axis_off:\n ax.axis('off')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n fig = mpl.pyplot.figure(figsize=(6, 5))\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(azim=-120, elev=30)\n ax.grid(False)\n ax.xaxis.pane.fill = False\n ax.yaxis.pane.fill = False\n ax.zaxis.pane.fill = False\n ax.xaxis.pane.set_edgecolor('k')\n ax.yaxis.pane.set_edgecolor('k')\n ax.zaxis.pane.set_edgecolor('k')\n ax.xaxis.pane._alpha = 0.9\n ax.yaxis.pane._alpha = 0.9\n ax.zaxis.pane._alpha = 0.9\n ax.set_xlabel('$x$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_ylabel('$y$ axis', fontsize=12, color='k', labelpad=12)\n ax.set_zlabel('$z$ axis', fontsize=12, color='k', labelpad=12)\n # ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n # ax_xy.zaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))\n for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(8)\n for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(8)\n for t in ax.zaxis.get_major_ticks(): t.label.set_fontsize(8)\n ax.xaxis.set_rotate_label(False)\n ax.yaxis.set_rotate_label(False)\n ax.zaxis.set_rotate_label(False)\n if ticks_off:\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n if axis_off:\n ax.axis('off')\n\n else: raise Exception('The number of dimensions must be 1, 2 or 3')\n\n\n # Add objects to the plot\n if curve: self.plot_curve(fig, ax)\n if control_points: self.plot_control_points(fig, ax)\n if frenet_serret: self.plot_frenet_serret(fig, ax)\n\n # Set the scaling of the axes\n self.rescale_plot(fig, ax)\n\n return fig, ax", "def plot_roc_curve(y_true, y_pred_proba, threshold=0.5):\n\n y_pred = predict_with_threshold(y_pred_proba, threshold)\n roc_auc = roc_auc_score(y_true, y_pred)\n fpr, tpr, thresholds = roc_curve(y_true, y_pred_proba)\n\n plt.plot( # roc auc line\n fpr, tpr,\n label='AUC={:.3f}'.format(roc_auc),\n linewidth=3, alpha=0.7)\n plt.plot( # base line\n [0, 1], [0, 1], 'r--',\n label='baseline=0.5',\n linewidth=3, alpha=0.3)\n\n plt.xlim(0, 1)\n plt.ylim(0, 1)\n plt.title('ROC curve')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.legend(loc=\"lower right\")", "def test_make_plot_ur(self):\n print(sys._getframe().f_code.co_name)\n try:\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n pp.make_plot(x,y,plot_type='ur')\n except Exception as e:\n raise\n plt.close('all')", "def pr_plot(label, recall, precision):\n plt.figure()\n for i in range(len(label)):\n plt.plot(recall[i], precision[i], label=label[i], alpha=0.75)\n plt.xlim([-0.01, 1.01])\n plt.ylim([-0.01, 1.01])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title('PR Curve')\n plt.legend(loc='upper right')\n plt.show()", "def roi_curves(self, data):\n if not data or not any(len(d) for d in data.values()):\n self.roi_traces = None\n default_curve = hv.Curve([], 'Spectrum', 'CL').opts(color='red') \n return hv.NdOverlay({0: default_curve}).opts(show_legend=False) # code breaks without using a curve in ndoverlay\n \n curves = {}\n data = zip(data['x0'], data['x1'], data['y0'], data['y1'])\n self.roi_traces = []\n for i, (x0, x1, y0, y1) in enumerate(data):\n selection = self.xds.sel(x=slice(x0, x1), y=slice(y1, y0))\n selection_avg = selection.mean(['x','y'])\n self.roi_traces.append(selection_avg)\n if self.roi_toggle == 'Trans': # apparently param knows when this changes without having to make it a 'stream' var\n if i == 0:\n substrate = selection_avg.copy()\n selection_avg /= substrate\n curves[i] = hv.Curve(selection_avg)\n \n color_cycle_opts = opts.Curve(color= hv.Cycle(self.color_cycle))\n return hv.NdOverlay(curves).opts(color_cycle_opts)", "def plotOfCos1(self):\n#\t\tp1=_plot.plot(yLabel='',xLabel='time [ms]',title=self.title,\n#\t\t\t\t\t subtitle='Cos1 Rogowski',shotno=[self.shotno])\n#\t\tp1.addTrace(xData=self.time*1000,yData=self.cos1)\n#\t\t\n#\t\treturn p1\n\t\tfig,p1=_plt.subplots()\n\t\tp1.plot(self.time*1e3,self.cos1)\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "def roc(proba, ts, classifier_name):\n fpr, tpr, _ = roc_curve(ts, proba[:,1])\n roc_auc = auc(fpr, tpr)\n print(\"Area under the ROC curve for %s : %f\") % (classifier_name, roc_auc)\n\n l = 'ROC curve for %s (area = %0.2f)' % (classifier_name, roc_auc)\n pl.clf()\n pl.plot(fpr, tpr, label=l)\n pl.plot([0, 1], [0, 1], 'k--')\n pl.xlim([0.0, 1.0])\n pl.ylim([0.0, 1.0])\n pl.xlabel('False Positive Rate')\n pl.ylabel('True Positive Rate')\n pl.title('Receiver operating characteristic for %s' % classifier_name)\n pl.legend(loc=\"lower right\")\n pl.show()", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def plot_ROC_curves(fig, ax, y_all, perf, title=None):\n curves = {'IMPRESS_all': 'royalblue',\n 'IMPRESS_HE_only': 'plum',\n 'IMPRESS_IHC_only': 'pink',\n 'pathologists_eval': 'tomato'}\n \n type_convert = {'IMPRESS_all': 'IMPRESS',\n 'IMPRESS_HE_only': 'IMPRESS (H&E only)',\n 'IMPRESS_IHC_only': 'IMPRESS (IHC only)',\n 'pathologists_eval': 'Pathologists'}\n \n for fgroup in curves.keys():\n tprs = []\n aucs = []\n mean_fpr = np.linspace(0, 1, 100)\n ax.set_aspect('equal')\n for seed in range(int(y_all[fgroup].shape[1]/3)):\n y_true = y_all[fgroup].loc[:,'y_true'].iloc[:,seed]\n y_pred_proba = y_all[fgroup].loc[:,'y_pred_proba'].iloc[:,seed]\n tpr, fpr, treshold = roc_curve(y_true, 1-y_pred_proba)\n tprs.append(np.interp(mean_fpr, fpr, tpr))\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n ax.plot(fpr, tpr, color=curves[fgroup], linewidth=2, alpha=0.10, label=None)\n \n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n \n ax.plot(mean_fpr, mean_tpr, color=curves[fgroup],\n label=r'%s (AUC = %0.4f $\\pm$ %0.2f)' % \\\n (type_convert[fgroup], perf[fgroup].loc['AUC','mean'], perf[fgroup].loc['AUC','std']),\n linewidth=3.0, alpha=0.80)\n \n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n \n if fgroup == 'IMPRESS_all':\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.1,\n label=r'$\\pm$ 1 standard deviation')\n else:\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.1,\n label=None)\n \n ax.set_xlabel('False positive rate')\n ax.set_ylabel('True positive rate')\n x = [0.0, 1.0]\n plt.plot(x, x, linestyle='dashed', color='red', linewidth=2.0, label='Random')\n plt.legend(fontsize=10, loc='best')\n \n if title is not None:\n fig.suptitle(t=title, fontsize=12)\n return fig", "def plot_ROC(y, prediction_prob, cnames, dataset_name, fname):\r\n plt.figure(figsize=(20, 10))\r\n for i in range(len(cnames)):\r\n fpr, tpr, _ = roc_curve(y, prediction_prob[i][:,1])\r\n auc = roc_auc_score(y, prediction_prob[i][:,1])\r\n plt.plot(fpr, tpr, label='%s (AUC = %.2f)'%(cnames[i], auc))\r\n plt.plot([0, 1], [0, 1], 'k--', lw=2)\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('ROC Curve - Binary Classification - %s'%(dataset_name))\r\n plt.legend(loc=\"lower right\")\r\n plt.savefig(fname, bbox_inches='tight')\r\n plt.close()", "def plot_roc(self, ax, prob, y, label='ROC'):\n self.df = self.calculate_threshold_values(prob, y)\n ax.plot([1] + list(self.df.fpr), [1] + list(self.df.tpr), label=label)\n x = [1] + list(self.df.fpr)\n y1 = [1] + list(self.df.tpr)\n y2 = x\n ax.fill_between(x, y1, y2, alpha=0.2)\n ax.set_xlabel('fpr')\n ax.set_ylabel('tpr')\n ax.set_title('ROC Curve')\n ax.legend()", "def plot_roc_curve(y: np.ndarray, \n y_pred_positive: np.ndarray, \n label: str) -> None:\n fpr, tpr, thresholds = metrics.roc_curve(y, y_pred_positive)\n plt.figure(figsize=(8, 8))\n plt.plot(fpr, tpr, \"b:\", linewidth=2, label=label)\n plt.fill_between(fpr, tpr, color='blue', alpha=0.3)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.axis([0, 1, 0, 1])\n plt.xlabel('Odsetek fałszywie pozytywnych (FPR)', fontsize=16)\n plt.ylabel('Odsetek prawdziwie pozytywnych (TPR)', fontsize=16)\n plt.legend(loc=\"lower right\", fontsize=16)\n plt.title('Krzywa ROC, AUC={0:.3f}'.format(metrics.roc_auc_score(y, y_pred_positive)), fontsize=18)\n plt.show()", "def plot_vis_test(plotfile,pdf_file):\n\t# First some parameters looked up from configfile---------------------------------\n\t\n\tgrbdir = runconf['l2file'][0:10]\n\tpre_tstart = runconf['bkg1start']\n\tpre_tend = runconf['bkg1end']\n\ttrigtime = runconf['trigtime']\n\tgrb_tstart = runconf['transtart']\n\tgrb_tend = runconf['tranend']\n\tpost_tstart = runconf['bkg2start']\n\tpost_tend = runconf['bkg2end']\n\tt_src = grb_tend - grb_tstart \n\tt_tot = (pre_tend-pre_tstart)+(post_tend-post_tstart)\n\tra_tran = runconf['ra']\n\tdec_tran = runconf['dec']\n\tlc_bin = runconf['lc_bin']\n\talpha = runconf['alpha']\n\tbeta = runconf['beta']\n\tE0 = runconf['E0']\n\tA = runconf['A']\n\tsim_scale = t_src\n\tpixbin = int(runconf['pixsize'])\n\tcomp_bin = int(runconf['comp_bin'])\n\ttyp = runconf['typ']\n\n\t# Calling txy to calculate thetax thetay and the coordinates----------------------\n\t\n\tthetax,thetay,x,y,z,t = txy(runconf['mkffile'], trigtime, ra_tran, dec_tran)\n\t\n\t# Plot the 3d visualisation for the position of the transient---------------------\n\tplt.figure()\n\tfig = visualize_3d(grbdir,x,y,z, t, thetax, thetay, grbdir)\t\n\tpdf_file.savefig(fig)\n\t\n\t# Plotting the lightcurves for the four quadrants---------------------------------\n\tfig = plt.figure()\n\tclean_file = fits.open(runconf['infile'])\n\tplt.title('Light curves for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\t\n\tquad0 = clean_file[1].data\n\tdata0,bin_edge = np.histogram(quad0['time'], bins=np.arange(quad0['time'][0],quad0['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data0,label='Quad 0',lw=0.7)\n quad1 = clean_file[2].data\n\tdata1,bin_edge = np.histogram(quad1['time'], bins=np.arange(quad1['time'][0],quad1['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data1,label='Quad 1',lw=0.7) \n\tquad2 = clean_file[3].data\n\tdata2,bin_edge = np.histogram(quad2['time'], bins=np.arange(quad2['time'][0],quad2['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data2,label='Quad 2',lw=0.7)\n quad3 = clean_file[4].data\n data3,bin_edge = np.histogram(quad3['time'], bins=np.arange(quad3['time'][0],quad3['time'][-1],lc_bin))\n plt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data3,label='Quad 3',lw=0.7)\n\tplt.axvspan(grb_tstart,grb_tend,color='blue',alpha=0.1,label='GRB')\n\tplt.axvspan(pre_tstart,pre_tend,color='orange',alpha=0.2)\n\tplt.axvspan(post_tstart,post_tend,color='orange',alpha=0.2,label='Background')\n\tplt.legend(prop={'size':6})\n\tplt.xlim(pre_tstart-100,post_tend+100)\n\tpdf_file.savefig(fig)\n\t\n\t# Calling the sim_dph--------------------------------------------------------------\n\t\n\tgrb_flat,bkgd_flat,grb_dph,bkgd_dph,t_src,t_total = data_bkgd_image(grbdir,pre_tstart,pre_tend,grb_tstart,grb_tend,post_tstart,post_tend)\n\n\tsim_flat,sim_dph,badpix_mask,sim_err_dph = simulated_dph(grbdir,typ,t_src,alpha,beta,E0,A)\n\n\tsrc_dph = grb_dph-bkgd_dph*t_src/t_tot\n\n print \"Total counts in simulated dph: \",(sim_dph).sum()\n print \"Total counts after badpix mask is applied: \",(sim_dph*badpix_mask).sum()\n\tprint \"Excess counts in badpix masked src dph: \",(src_dph*badpix_mask).sum()\n \n\t# Plotting the DPHs before badpix correction---------------------------------------\n\t\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs before badpix correction for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 - 0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\n\t # Source \n\tim = ax4.imshow(src_dph,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n \t# Source + Background\n\tim = ax1.imshow(grb_dph,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\n \t# Background\n\tim = ax2.imshow(bkgd_dph*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\t\n\t# Plotting the Badpix mask---------------------------------------------\n\n\tfig = plt.figure()\n\tax = plt.subplot(111)\n\tplt.title('Badpix Mask for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\tim = ax.imshow(badpix_mask,interpolation='none')\n\tax.set_xlim(-9,128 -0.5)\n\tax.axvline(x=-5.,ymin=0,ymax=64,linewidth=5,color='k')\n\tax.spines['left'].set_position(('data',-0.5))\n\tax.xaxis.set_ticks(np.arange(0,128,16))\n\tax.yaxis.set_ticks(np.arange(0,128,16))\n\tfig.colorbar(im,ax=ax,fraction=0.046, pad=0.04)\n\t\n\tpdf_file.savefig(fig) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs--------------------------------------------\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph*badpix_mask,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 -0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\n\t # Source \n\tim = ax4.imshow(src_dph*badpix_mask,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n\t # Source + Background\n\tim = ax1.imshow(grb_dph*badpix_mask,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\n\t # Background\n\tim = ax2.imshow(bkgd_dph*badpix_mask*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs (Binned) ----------------------------------------------------\n\tfor p in [4,8,16]:\n\t\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\t\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay)+ \"pixsize=\"+str(p))\n\t\t # Sim\n\t\tim = ax3.imshow(resample(sim_dph*badpix_mask,p),interpolation='none')\n\t\tax3.set_title('Sim DPH',fontsize=8)\n\t\tax3.set_xlim(-1,128/p -0.5)\n\t\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax3.spines['left'].set_position(('data',-0.5))\n\t\tax3.set_yticklabels([])\n\t\tax3.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n\t\tax3.set_xticklabels(np.arange(0,128,16))\n\t\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source \n\t\tim = ax4.imshow(resample(src_dph*badpix_mask,p),interpolation='none',vmin=0)\n\t\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\t\tax4.set_xlim(-1,128/p -0.5)\n\t\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax4.spines['left'].set_position(('data',-0.5))\n\t\tax4.set_yticklabels([])\n ax4.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax4.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source + Background\n\t\tim = ax1.imshow(resample(grb_dph*badpix_mask,p),interpolation='none')\n\t\tax1.set_title('Src + Bkg DPH',fontsize=10)\n\t\tax1.set_xlim(-1,128/p -0.5)\n\t\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax1.spines['left'].set_position(('data',-0.5))\n\t\tax1.set_yticklabels([])\n ax1.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax1.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Background\n\t\tim = ax2.imshow(resample(bkgd_dph*badpix_mask*t_src/t_total,p),interpolation='none')\n\t\tax2.set_title('Bkg DPH',fontsize=8)\n\t\tax2.set_xlim(-1,128/p -0.5)\n\t\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax2.spines['left'].set_position(('data',-0.5))\n\t\tax2.set_yticklabels([])\n ax2.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax2.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\t\tf.set_size_inches([6.5,6.5])\n\t\t\n\t\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\tprint \"No. of pixels with zero counts in sim_dph: \",sim_dph[sim_dph==0].size\n\tprint \"No. of pixels with zero counts in grb_dph(no bkg subtration): \",grb_dph[grb_dph==0].size\n\t\n\t# Generating the array for module number ------------------------------------------------\n\tA = ['A'+str(i) for i in range(16)]\n\tB = np.flip(['B'+str(i) for i in range(16)],0)\n\tC = np.flip(['C'+str(i) for i in range(16)],0)\n\tD = ['D'+str(i) for i in range(16)]\n\tquad_a = np.reshape(A,(4,4))\n\tquad_b = np.reshape(B,(4,4))\n\tquad_c = np.reshape(C,(4,4))\n\tquad_d = np.reshape(D,(4,4))\n\tMod_arr = np.ndarray((8,8),dtype='|S3')\n\tMod_arr[:4,:4] = quad_a\n\tMod_arr[:4,4:] = quad_b\n\tMod_arr[4:,4:] = quad_c\n\tMod_arr[4:,:4] = quad_d\n\tMod_names = Mod_arr.flatten()\n\t#print \"Module name array : \",Mod_names\n\t#-----------------------------------------------------------------------------------------\n\t\t\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\tmodel = sim_flat_bin\n\tmodel_copy = np.copy(model)\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\tdata_copy = np.copy(data)\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\terr_model = sim_err_flat_bin\n\terr_model_copy = np.copy(err_model)\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\terr_data_copy = np.copy(err_data)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f}\".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model\",elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tprint \"No. of pixels with zero counts in sim_flat: \",sim_flat[sim_flat==0].size\n\tprint \"No. of pixels with zero counts in src_flat: \",src_flat[src_flat==0].size\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\t#print \"The bin edges: \",x # ---------------------------------------------------------------\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n\tprint \"Total sim_flat_bin : \",sim_flat_bin.sum() #-----------------------------------------\n\t#print \" Max(cumsum) : \",max(np.cumsum(sim_flat)) #-----------------------------------------\n\n # Defining model background and data\n model = sim_flat_bin #avg_flat_bin\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n err_model = sim_err_flat_bin\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\t# Plotting observed vs predicted counts------------------------------------------------------\n\n\tfig = plt.figure()\n\tplt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$={cs:0.1f}\".format(cs=chi_sq))\n\tplt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n\tplt.plot(np.arange(-1000,1000),np.arange(-1000,1000),'k',linewidth=0.5)\n\tplt.xlim(min(model_copy)-5,max(model_copy)+5)\n\tplt.ylim(min(data_copy)-5,max(data_copy)+5)\n\tplt.xlabel('Predicted Counts')\n\tplt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n\tpdf_file.savefig(fig)\n\n\t# Scaling the model using curve fit =============================================================== \n\t\n\tparam,pcov = curve_fit(fit_line_int,model_copy,data_copy)\n\tscaling = param[0]\n\tintercept = param[1]\n\t\n\t# Plotting the scaled plots ===================================================================\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\t#model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\t#err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated (scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f},offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\t\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model(scaling = {s:0.2f}, offset={o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n # Defining model background and data\n #model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n #err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated(scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f}, offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\n\t# Plotting observed vs predicted counts--------------------------------------------------------\n\n\tfig = plt.figure()\n plt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$ = {cs:0.1f}\".format(cs=chi_sq))\n plt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\t\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n #plt.plot(np.arange(-1000,1000),fit_line(np.arange(-1000,1000),scaling),'k',linewidth=0.5,label='m = {s:0.2f}'.format(s=scaling))\n\tplt.plot(np.arange(-1000,1000),fit_line_int(np.arange(-1000,1000),scaling,intercept),'k',linewidth=0.5,label='scaling = {s:0.2f}, offset = {i:0.2f}'.format(s=scaling,i=intercept))\n\tplt.plot(np.arange(min(model_copy)-5,max(model_copy)+5),np.ones(len(np.arange(min(model_copy)-5,max(model_copy)+5)))*intercept,'r-',label='intercept',linewidth=0.5)\n plt.xlim(min(model_copy)-5,max(model_copy)+5)\n plt.ylim(min(data_copy)-5,max(data_copy)+5)\n plt.xlabel('Predicted Counts')\n plt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n pdf_file.savefig(fig)\n\t\t\n\tprint \"===============================================================================================\"\n\t\n\treturn", "def plot_calibration_curve(est, name, fig_index):\n # Calibrated with isotonic calibration\n isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # Logistic regression with no calibration as baseline\n lr = LogisticRegression(C=1.)\n\n fig = plt.figure(fig_index, figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n for clf, name in [(lr, 'Logistic'),(est, name),(isotonic, name + ' + Isotonic'),(sigmoid, name + ' + Sigmoid')]:\n #Para cada modelo, entrenamos y predecimos \n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\\n\" % f1_score(y_test, y_pred))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)')\n\n ax2.set_xlabel(\"Mean predicted value\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n plt.tight_layout()", "def roofline_plot():\n\n def attainable_performance(operational_intensity):\n return min(PEAK_PERFORMANCE, MEMORY_BANDWIDTH * operational_intensity)\n\n oi_values = np.logspace(-4, 12, 1000, base=2)\n perf_values = [attainable_performance(oi) for oi in oi_values]\n fig, ax = viz_utils.setup_figure_1ax(x_label='Operational Intensity [Flops/Bytes]',\n y_label='Performance [Flops/Cycle]')\n ax.set_xscale(\"log\", basex=2)\n ax.set_yscale(\"log\", basey=2)\n ax.plot(oi_values, perf_values, linewidth=2.0, alpha=0.7)\n ax.set_aspect('equal', adjustable='datalim')\n\n ridge_point = PEAK_PERFORMANCE / MEMORY_BANDWIDTH\n ax.annotate(f'{{{ridge_point:0.1f}, {PEAK_PERFORMANCE:0.1f}}}',\n xy=(ridge_point, PEAK_PERFORMANCE),\n xytext=(-70, 15), textcoords='offset points',)\n return fig, ax", "def main():\n\tplt.clf()\n\taxes = setup_axes()\n\tplot_output(axes, \"../../simulations/default\", \"black\")\n\tplot_output(axes, \"../../simulations/yccsr_zero\", \"crimson\")\n\tplot_output(axes, \"../../simulations/yccsr_linear\", \"lime\")\n\tplot_output(axes, \"../../simulations/yccsr_1-exp\", \"deepskyblue\")\n\tvisuals.plot_track_points_intervals(axes[0],\n\t\tvice.history(\"../../simulations/default\"), element = \"Sr\",\n\t\treference = \"Fe\")\n\tplot_legend(axes[1])\n\tplt.tight_layout()\n\tvisuals.yticklabel_formatter(axes[1])\n\tplt.savefig(sys.argv[1])\n\tplt.clf()", "def make_plot(x,y):", "def plot_all(pred_tuple, filename='roc.png'):\n plt.clf()\n colors = [\"red\",\"blue\",\"green\",\"orange\",\"yellow\"]\n for (label, y, proba), color in zip(pred_tuple, colors):\n true_pos, false_pos, thresh = metrics.roc_curve(y, proba)\n plt.plot(false_pos, true_pos, label=label, linewidth=2,\n color=color)\n plt.plot([0,1],[0,1], linestyle=\"dashed\", color=\"grey\", label=\"random\")\n plt.xlim([0,1])\n plt.ylim([0,1])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"Receiver Operating Characteristic\")\n plt.legend(loc=\"lower right\")\n\n plt.show()\n plt.savefig(_plots_path + filename)", "def plot_calibration_curve(classifier_name, pred_csv_file, fig_index):\n\n from sklearn.metrics import brier_score_loss, precision_score, recall_score, f1_score\n from sklearn.calibration import CalibratedClassifierCV, calibration_curve\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n from sklearn.isotonic import isotonic_regression\n from sklearn.metrics import roc_auc_score, roc_curve, auc\n\n # # Calibrated with isotonic calibration\n # isotonic = CalibratedClassifierCV(base_estimator=None, cv=\"prefit\", method='isotonic')\n\n # # Calibrated with sigmoid calibration\n # sigmoid = CalibratedClassifierCV(base_estimator=None, cv=\"prefit\", method='sigmoid')\n\n # # Logistic regression with no calibration as baseline\n # lr = LogisticRegression(C=1., solver='lbfgs')\n\n fig = plt.figure(fig_index, figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n\n # for name in [classifier_name, classifier_name + ' + Isotonic', classifier_name + ' + Sigmoid']:\n for name in [classifier_name, classifier_name + ' + Sigmoid']:\n # for name in [classifier_name]:\n\n y_test, prob_pos, y_pred, _, _ = read_pred_csv_file_to_arrays(pred_csv_file)\n\n if name == classifier_name + ' + Sigmoid':\n a, b = sigmoid_calibration(prob_pos, y_test, sample_weight=None)\n prob_pos = predict_sigmoid(a, b, prob_pos)\n print a, b\n y_pred = binary_predict(prob_pos, threshold = 0.5)\n\n\n if name == classifier_name + ' + Isotonic' :\n prob_pos = isotonic_regression(prob_pos, sample_weight=None, y_min=None, y_max=None,\n increasing=True)\n y_pred = binary_predict(prob_pos, threshold = 0.5)\n\n\n # print prob_pos[:20]\n # # plot roc curve for test: class 1 only\n # fpr, tpr, _ = roc_curve(y_test, prob_pos)\n # lw = 2\n # plt.plot(fpr, tpr, color='darkorange',\n # lw=lw, label='ROC curve (area = %0.2f)' %(roc_auc_score(y_test, prob_pos, average='macro')))\n # plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n # plt.xlim([0.0, 1.0])\n # plt.ylim([0.0, 1.05])\n # plt.xlabel('False Positive Rate')\n # plt.ylabel('True Positive Rate')\n # plt.title('Receiver operating characteristic example')\n # plt.legend(loc=\"lower right\")\n # plt.savefig('plots/roc_%s.png'%(name))\n # plt.clf()\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=1)\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\" % f1_score(y_test, y_pred))\n print(\"\\tROC: %1.3f\\n\" % roc_auc_score(y_test, prob_pos, average='macro'))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)')\n\n ax2.set_xlabel(\"Mean predicted value\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n plt.tight_layout()\n plt.savefig('plots/calibration.png')\n plt.clf()", "def run():\n \n start_time = time.time()\n \n args = parse_args_plotting()\n config = ConfigParser()\n config.read(args.config_file)\n \n # initialize the OP class object\n OPs = initialize_plot_options(config)\n \n # which plot\n plot_settings = {}\n burnin = config.getint('plotting', 'burnin', fallback=0)\n plot_astr = config.getboolean('plotting', 'Astrometry_orbits_plot', fallback=False)\n plot_astr_pred = config.getboolean('plotting', 'Astrometric_prediction_plot', fallback=False)\n plot_rv_full = config.getboolean('plotting', 'RV_orbits_plot', fallback=False)\n plot_rv = config.getboolean('plotting', 'RV_plot', fallback=False)\n plot_rel_sep = config.getboolean('plotting', 'Relative_separation_plot', fallback=False)\n plot_position_angle = config.getboolean('plotting', 'Position_angle_plot', fallback=False)\n plot_proper_motions = config.getboolean('plotting', 'Proper_motion_plot', fallback=False)\n plot_corner = config.getboolean('plotting', 'Corner_plot', fallback=False)\n save_params = config.getboolean('save_results', 'save_params', fallback=True)\n checkconv = config.getboolean('plotting', 'check_convergence', fallback=False)\n \n if checkconv:\n OPs.plot_chains()\n if plot_astr:\n OPs.astrometry()\n if plot_astr_pred:\n OPs.astrometric_prediction_plot()\n if plot_rv_full:\n OPs.RV_fullorbit()\n if plot_rv:\n OPs.RV()\n if plot_rel_sep:\n OPs.relsep()\n if plot_position_angle:\n OPs.PA()\n if plot_proper_motions:\n OPs.proper_motions()\n if plot_corner:\n OPs.plot_corner()\n if save_params:\n OPs.save_data()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def plot_calibration_curve(est, name, fig_index, data):\n\n X_train = data[0]\n X_test = data[1]\n y_train = data[2]\n y_test = data[3]\n\n y = np.concatenate([y_train, y_test], axis=0)\n\n # Calibrated with isotonic calibration\n isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # Logistic regression with no calibration as baseline\n lr = LogisticRegression(C=1., solver='lbfgs')\n\n fig = plt.figure(1, figsize=(15, 10))\n ax1 = plt.subplot2grid((4, 6), (0, 0), colspan=2, rowspan=2)\n ax2 = plt.subplot2grid((4, 6), (0, 2), colspan=2, rowspan=2)\n ax3 = plt.subplot2grid((4, 6), (0, 4), colspan=2, rowspan=2)\n ax4 = plt.subplot2grid((4, 6), (2, 0), colspan=6, rowspan=2)\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n for clf, name in [(lr, 'Logistic'),\n (est, name),\n (isotonic, name + ' + Isotonic'),\n (sigmoid, name + ' + Sigmoid')]:\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n y_proba = prob_pos.copy()\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n y_proba = prob_pos.copy()\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\" % f1_score(y_test, y_pred))\n print(\"\\tAve. Precision Score: %1.3f\\n\" % \\\n average_precision_score(y_test, y_proba))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n fpr, tpr, thresholds = roc_curve(y_test, y_proba, drop_intermediate=False)\n roc_auc = roc_auc_score(y_test, y_proba)\n ax2.plot(fpr, tpr, ls='-', label=\"%s (%1.3f)\" % (name, roc_auc))\n\n precision, recall, _ = precision_recall_curve(y_test, y_proba)\n ax3.plot(recall, precision)\n\n ax4.hist(prob_pos, range=(0, 1), bins=10,\n label='%s' % name, histtype=\"step\", lw=2)\n\n ax1.set_xlabel(\"Score\", fontsize=14)\n ax1.set_ylabel(\"Fraction of positives\", fontsize=14)\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)', fontsize=16)\n\n ax2.set_xlabel(\"False Positive Rate\", fontsize=14)\n ax2.set_ylabel(\"True Positive Rate\", fontsize=14)\n ax2.set_ylim([-0.05, 1.05])\n ax2.legend(loc=\"lower right\")\n ax2.set_title('ROC Curve', fontsize=16)\n\n ax3.set_xlabel(\"Recall\", fontsize=14)\n ax3.set_ylabel(\"Precision\", fontsize=14)\n ax3.set_ylim([-0.05, 1.05])\n ax3.legend(loc=\"lower center\")\n ax3.set_title('Precision-Recall Curve', fontsize=16)\n\n ax4.set_xlabel(\"Mean predicted value\", fontsize=14)\n ax4.set_ylabel(\"Count\", fontsize=14)\n ax4.legend(loc=\"upper center\")\n ax4.set_title('Classification Result', fontsize=16)\n\n plt.tight_layout()\n\n plt.show()\n\n return", "def evaluate(self, plot):", "def plotROC(yscore, true, predtrue, datasets, title, outfile):\n fig = plt.figure()\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n \n for i in range(len(datasets)):\n acc = accuracy_score(true[i], predtrue[i])\n fpr, tpr, _ = roc_curve(true[i], yscore[i][:,1])\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, label=datasets[i]+' (area = %0.2f, acc = %0.2f)' % (roc_auc,acc),linewidth=2)\n \n plt.legend(loc=\"lower right\")\n \n pdfplot = PdfPages(outfile);\n pdfplot.savefig(fig)\n pdfplot.close()", "def main():\n\n\timport numpy as np\n\timport matplotlib as mpl\n\n\t# Parse arguments\n\targs = cli.parse_args()\n\n\t# Check all files exist\n\tfor filepath in args.files:\n\t\tif not os.path.isfile(filepath):\n\t\t\t_exit('file {} does not exist'.format(filepath))\n\n\t# Use TK interactive backend as it should work on all installations\n\tmpl.use('tkagg')\n\n\t# Turn on interactive plotting (pyplot import must come after call to use())\n\timport matplotlib.pyplot as plt\n\tplt.ion()\n\n\t# Get torsion angle arrays for all input files\n\tarrays = []\n\tfor filepath in args.files:\n\t\twith open(filepath) as fobj:\n\t\t\tchain = list(parse_pdb_chain(fobj))\n\t\t\tarrays.append(torsion_array(chain))\n\n\tangles = np.concatenate(arrays, axis=0)\n\n\t# Make the plot\n\tif args.plottype == 'hexbin':\n\t\thb = ramachandran_hexbin(angles, log=args.log, degrees=not args.radians)\n\t\tif args.cbar:\n\t\t\tplt.colorbar(hb)\n\n\telif args.plottype == 'scatter':\n\t\tramachandran_scatter(angles, degrees=not args.radians)\n\n\telif args.plottype == 'kde':\n\t\t_assert_seaborn_available('kde')\n\t\tramachandran_kdeplot(angles, shade=True, degrees=not args.radians)\n\n\telif args.plottype == 'joint':\n\t\t_assert_seaborn_available('joint')\n\t\tramachandran_jointplot(angles, kind='kde', degrees=not args.radians)\n\n\telse:\n\t\traise ValueError(args.plottype)\n\n\t# Plot title\n\tif len(args.files) == 1:\n\t\tfilename = os.path.basename(args.files[0])\n\telse:\n\t\tfilename = '{} files'.format(len(args.files))\n\n\tplt.gcf().suptitle('{} ({} residues)'.format(filename, angles.shape[0]))\n\n\t# Show the plot, blocking until closed\n\tplt.show(block=True)", "def plot_mean_roc_curve_of_classifiers(classifier_roc_list, data_set_description):\n if const.RECORD_RESULTS is True:\n fig = plt.figure(figsize=(8, 6.66))\n monochrome = (cycler(\"color\", [\"k\"]) * cycler(\"marker\", [\"\"]) *\n cycler(\"linestyle\", [\"-\", \"--\", \"-.\"]))\n color_arr = [\"#64B3DE\", \"#1f78b4\", \"#6ABF20\", \"#FBAC44\", \"#bc1659\", \"#B9B914\", \"#33a02c\", \"#ff7f00\", \"#6a3d9a\", \"black\", \"#b15928\", \"#e31a1c\"]\n plt.rc(\"axes\", prop_cycle=monochrome)\n line_style_index = 0\n color_index = 0\n\n for (test_run_roc_list, classifier_description) in classifier_roc_list:\n if not (None, None) in test_run_roc_list[0]:\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n count = 0\n for roc_list in test_run_roc_list:\n for (tpr, fpr) in roc_list:\n mean_tpr += interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n count += 1\n\n mean_tpr /= float(count)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n line_width = 0.5\n if line_style_index == 1:\n line_width = 0.8\n elif line_style_index == 2:\n line_width = 1.5\n\n plt.plot(mean_fpr, mean_tpr, c=color_arr[color_index], lw=line_width, alpha=1, label=\"{0} ({1:.3f})\".format(classifier_description, mean_auc))\n line_style_index = (line_style_index + 1) % 3\n color_index += 1\n\n plt.locator_params(axis='x', nbins=10)\n plt.locator_params(axis='y', nbins=10)\n plt.plot([0, 1], [0, 1], \"k--\", label=\"Random classification\", lw=0.8)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"ROC curve for each classifier\")\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}_roc_classifier_plot_{1}.png\".format(data_set_description, current_time), bbox_inches=\"tight\")\n plt.close(fig)", "def plot(self,displayplt = True,saveplt = False,savepath='',polarplt=True, dbdown = False):\n plt.figure()\n\n #legacy beamprofile data is a 1-D array of the peak negative pressure\n pnp = self.pnp\n\n if dbdown:\n pnp = 20.0*np.log10(pnp/np.max(pnp))\n else:\n pnp = pnp*1e-6\n\n if polarplt:\n figure1 = plt.polar(self.angle * np.pi / 180.0, pnp)\n else:\n figure1 = plt.plot(self.angle, pnp)\n #the latest beamprofile data should be a 2-D array of the hydrophone output\n plt.xlabel('Angle (degrees)')\n if dbdown:\n plt.ylabel('Peak Negative Pressure (dB Max)')\n else:\n plt.ylabel('Peak Negative Pressure (MPa)')\n plt.title(self.txdr)\n if displayplt:\n plt.show()\n if saveplt:\n if savepath=='':\n #prompt for a save path using a default filename\n defaultfn = self.txdr+'_'+self.collectiondate+'_'+self.collectiontime+'_beamprofile.png'\n savepath = tkinter.filedialog.asksaveasfilename(initialfile=defaultfn, defaultextension='.png')\n plt.savefig(savepath)\n return figure1, savepath", "def plot_ROC_curve(model, X_train, X_test, y_train, y_test):\n \n # Model Metrics\n print model\n print \"*************************** Model Metrics *********************************\"\n print 'Accuracy: %s' % cross_val_score(model, X_train, y_train, scoring = 'accuracy', cv = 5).mean()\n print 'Precision: %s' % cross_val_score(model, X_train, y_train, scoring = 'precision', cv = 5).mean()\n print 'Recall: %s' % cross_val_score(model, X_train, y_train, scoring = 'recall_weighted', cv = 5).mean()\n print 'F1: %s' % cross_val_score(model, X_train, y_train, scoring = 'f1', cv = 5).mean()\n\n fitted = model.fit(X_train, y_train)\n try:\n y_score = fitted.predict_proba(X_test)[:,1]\n except:\n y_score = fitted.decision_function(X_test)\n \n # Confusion matrix\n print \"********************* Normalized Confusion Matrix *************************\"\n cm = confusion_matrix(y_test, fitted.predict(X_test))\n cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n \n print('Normalized confusion matrix')\n print(cm_normalized)\n plt.matshow(cm, cmap=plt.cm.Blues)\n plt.colorbar()\n plt.xlabel('Predicted Values')\n plt.ylabel('Actual Values')\n \n # Classification Report\n print \"********************* Classification Report********************************\" \n print classification_report(y_test, fitted.predict(X_test))\n \n print \"********************* ROC Curve *******************************************\"\n \n # ROC Curve\n fpr, tpr, _ = roc_curve(y_test, y_score)\n roc_auc = auc(fpr, tpr)\n \n plt.figure()\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()", "def plotTrajectory(arg, color = sf.cyan, xyRate=True, radiusRate = 80.0\n , blAxes = True):\n if not(hasattr(arg, '__getitem__')) and hasattr(arg, '__iter__'):\n arg = list(arg)\n\n vs = sf.vs_()\n\n color = tuple(color) # color argment may be list/vector\n if isinstance(arg,list) or isinstance(arg,tuple) or isinstance(\n arg,type(sf.sc.array([0,]))):\n from octnOp import ClOctonion\n if not(hasattr(arg[0],'__len__')) and isinstance(arg[0], complex):\n arg = [ (x.real, x.imag) for x in arg]\n elif not(hasattr(arg[0],'__len__')) and isinstance(arg[0], ClOctonion):\n arg = [ x[1:4] for x in arg]\n\n if len(arg[0])==2:\n import visual.graph as vg\n global __obj2dDisplayGeneratedStt\n\n maxX = max([abs(elm[0]) for elm in arg])\n maxY = max([abs(elm[1]) for elm in arg])\n\n print \"maxX:\",maxX, \" maxY:\",maxY\n\n if (__obj2dDisplayGeneratedStt == None):\n if xyRate == True: # 11.01.16 to \n maxAt = max(maxX, maxY)\n __obj2dDisplayGeneratedStt = vg.gdisplay(\n width=600*maxX/maxAt,height=600*maxY/maxAt)\n else:\n __obj2dDisplayGeneratedStt = vg.gdisplay(\n width=600,height=600)\n #__bl2dDisplayGeneratedStt = True\n grphAt = vg.gcurve(color = color)\n for i in range(len(arg)):\n assert len(arg[i])==2, \"unexpeted length data:\"+str(arg[i])\n grphAt.plot(pos = arg[i])\n\n #return __obj2dDisplayGeneratedStt\n #import pdb; pdb.set_trace()\n #print \"debug:\",grphAt.gcurve.pos\n\n # plot start mark\n grphSqAt = vg.gcurve(color = color)\n pos0At = grphAt.gcurve.pos[0,:][:2]\n rateAt = 50\n for x,y in sf.mitr([-maxX/rateAt, maxX/rateAt]\n , [-maxY/rateAt, maxY/rateAt]):\n grphSqAt.plot(pos = pos0At+[x,y])\n \n grphSqAt.plot(pos = pos0At+[-maxX/rateAt,-maxY/rateAt])\n\n return grphAt # 09.02.04 to animate graph\n elif len(arg[0])==3:\n vs.scene.forward=(-1,+1,-1)\n vs.scene.up=(0,0,1)\n\n c = vs.curve( color = color )\n\n maxX, maxY, maxZ = 0,0,0\n for i in range(len(arg)):\n if maxX < abs(arg[i][0]):\n maxX = abs(arg[i][0])\n if maxY < abs(arg[i][1]):\n maxY = abs(arg[i][1])\n if maxZ < abs(arg[i][2]):\n maxZ = abs(arg[i][2])\n c.append( arg[i] )\n #print c.pos\n print \"maxX:\",maxX, \" maxY:\",maxY, \" maxZ:\",maxZ\n maxAt = max(maxX,maxY,maxZ)\n c.radius = maxAt/radiusRate\n\n vs.sphere(pos = arg[0], radius = 3*c.radius, color = color)\n\n if blAxes == True:\n # draw axise\n vs.curve( pos=[(0,0,0), (maxAt,0,0)]\n , color=(1,0,0)\n , radius = maxAt/100 )\n vs.curve( pos=[(0,0,0), (0,maxAt,0)]\n , color=(0,1,0)\n , radius = maxAt/100 )\n vs.curve( pos=[(0,0,0), (0,0,maxAt)]\n , color=(0,1,1)\n , radius = maxAt/100 )\n #return vs.scene\n return c # 09.02.04 to animate graph\n else:\n assert False,\"unexpeted data:\"+str(arg)", "def plot_curves_lenstool(label = ' ', courves_file = 'ce.dat', marker = 'm.', \\\n plt_show = False):\n# x_ca, y_ca = np.loadtxt( courves_file, usecols=(3, 4), unpack=True )\n# x_cc, y_cc = np.loadtxt( courves_file, usecols=(1, 2), unpack=True )\n\n x_cc, y_cc, x_ca, y_ca = \\\n np.loadtxt( courves_file, usecols=(1, 2, 3, 4), unpack=True )\n\n plt.figure(1, figsize=(16, 8))\n\n plt.subplot(1, 2, 1).set_aspect(1)\n plt.plot(x_ca, y_ca, marker, linewidth=3, label = label)\n axis_limit = max( 1.075*max(np.absolute(x_ca)), \\\n 1.075*max(np.absolute(y_ca)) )\n plt.axis([-axis_limit, axis_limit, -axis_limit, axis_limit])\n\n plt.subplot (1, 2, 2).set_aspect(1)\n plt.plot(x_cc, y_cc, marker, linewidth=3, label = label)\n axis_limit = max( 1.075*max(np.absolute(x_cc)), \\\n 1.075*max(np.absolute(y_cc)) )\n plt.axis([-axis_limit, axis_limit, -axis_limit, axis_limit])\n plt.legend()\n\n if plt_show:\n plt.show()\n plt.close()", "def plot(self, *args, **kwargs):\n pass", "def plot_roc_curve(ht, scores, tp_label='tp', fp_label='fp', colors=None, title='ROC Curve', hover_mode='mouse'):\n if colors is None:\n # Get a palette automatically\n from bokeh.palettes import d3\n palette = d3['Category10'][max(3, len(scores))]\n colors = {score: palette[i] for i, score in enumerate(scores)}\n\n if isinstance(scores, str):\n scores = [scores]\n total_tp, total_fp = ht.aggregate((hl.agg.count_where(ht[tp_label]), hl.agg.count_where(ht[fp_label])))\n\n p = figure(title=title, x_axis_label='FPR', y_axis_label='TPR', tools=\"hover,save,pan,box_zoom,reset,wheel_zoom\")\n p.add_layout(Title(text=f'Based on {total_tp} TPs and {total_fp} FPs'), 'above')\n\n aucs = []\n for score in scores:\n ordered_ht = ht.key_by(_score=-ht[score])\n ordered_ht = ordered_ht.select(\n score_name=score, score=ordered_ht[score],\n tpr=hl.scan.count_where(ordered_ht[tp_label]) / total_tp,\n fpr=hl.scan.count_where(ordered_ht[fp_label]) / total_fp,\n ).key_by().drop('_score')\n last_row = hl.utils.range_table(1).key_by().select(score_name=score, score=hl.float64(float('-inf')), tpr=hl.float64(1.0), fpr=hl.float64(1.0))\n ordered_ht = ordered_ht.union(last_row)\n ordered_ht = ordered_ht.annotate(\n auc_contrib=hl.or_else((ordered_ht.fpr - hl.scan.max(ordered_ht.fpr)) * ordered_ht.tpr, 0.0)\n )\n auc = ordered_ht.aggregate(hl.agg.sum(ordered_ht.auc_contrib))\n aucs.append(auc)\n df = ordered_ht.annotate(score_name=ordered_ht.score_name + f' (AUC = {auc:.4f})').to_pandas()\n p.line(x='fpr', y='tpr', legend_field='score_name', source=ColumnDataSource(df), color=colors[score], line_width=3)\n\n p.legend.location = 'bottom_right'\n p.legend.click_policy = 'hide'\n p.select_one(HoverTool).tooltips = [(x, f\"@{x}\") for x in ('score_name', 'score', 'tpr', 'fpr')]\n p.select_one(HoverTool).mode = hover_mode\n return p, aucs", "def plot_roc(preds, labels, title=\"Receiver operating characteristic\"):\n\n # Compute values for curve\n fpr, tpr, _ = roc_curve(labels, preds)\n\n # Compute FPR (95% TPR)\n tpr95 = fpr_at_95_tpr(preds, labels)\n\n # Compute AUROC\n roc_auc = auroc(preds, labels)\n\n # Draw the plot\n plt.figure()\n lw = 2\n plt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='AUROC = %0.2f' % roc_auc)\n plt.plot([0, 1], [0.95, 0.95], color='black', lw=lw, linestyle=':', label='FPR (95%% TPR) = %0.2f' % tpr95)\n plt.plot([tpr95, tpr95], [0, 1], color='black', lw=lw, linestyle=':')\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--', label='Random detector ROC')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n plt.legend(loc=\"lower right\")\n plt.show()", "def _rfigure(self, legend=True, fig=None, ax=None):\n if fig is None and ax is None:\n fig, ax = plt.subplots()\n suptitle = True\n elif fig is None:\n fig = ax.get_figure()\n suptitle = False\n elif ax is None:\n ax = fig.gca()\n suptitle = False\n\n ax.grid(True)\n\n line_rstr = None\n line_rrls = None\n line_lstr = None\n line_lrls = None\n line_minima = None\n line_maxima = None\n t = self.timevector\n for axis, trace in zip('xy', ['positionX', 'positionY']):\n s = self.get_data(traces=trace) * 1e6 # m -> µm\n r_str_rls = self.stress_release_pairs(axis=axis, direction='right')\n l_str_rls = self.stress_release_pairs(axis=axis, direction='left')\n rstr = r_str_rls['stress']['idx']\n lstr = l_str_rls['stress']['idx']\n rrls = r_str_rls['release']['idx']\n lrls = l_str_rls['release']['idx']\n\n ax.plot(t, s, lw=0.1, ms=2, color='k', alpha=1.0)\n\n # line_rstr = None\n # line_rrls = None\n # line_lstr = None\n # line_lrls = None\n for rstr, rrls in zip(rstr, rrls):\n line_rstr, = ax.plot(t[rstr], s[rstr], lw=0.4, ms=2, color='m')\n line_rrls, = ax.plot(t[rrls], s[rrls], lw=0.4, ms=2, color='c')\n for lstr, lrls in zip(lstr, lrls):\n line_lstr, = ax.plot(t[lstr], s[lstr], lw=0.4, ms=2, color='g')\n line_lrls, = ax.plot(t[lrls], s[lrls], lw=0.4, ms=2, color='y')\n\n # line_minima = None\n # line_maxima = None\n for segment in self._sf.sections[axis]:\n minima = self.undecimate_and_limit(segment['minima'])\n maxima = self.undecimate_and_limit(segment['maxima'])\n line_minima, = ax.plot(t[minima], s[minima], '.', ms=5,\n color='b')\n line_maxima, = ax.plot(t[maxima], s[maxima], '.', ms=5,\n color='r')\n\n line_excited_x = None\n for x_c in (self.undecimate_and_limit(self._sf.excited['x'])\n / self.resolution):\n line_excited_x = ax.hlines(0.0, x_c[0], x_c[1], alpha=1,\n colors='b', linestyle='solid', lw=1)\n # ax.plot(x_c[0], 0.5, '.k', alpha=1, ms=3)\n # ax.plot(x_c[1], 0.5, '.k', alpha=1, ms=3)\n ax.vlines(x_c[0], -0.01, 0.01, alpha=1, colors='b',\n linestyle='solid', lw=1)\n ax.vlines(x_c[1], -0.01, 0.01, alpha=1, colors='b',\n linestyle='solid', lw=1)\n\n line_excited_y = None\n for y_c in (self.undecimate_and_limit(self._sf.excited['y'])\n / self.resolution):\n line_excited_y = ax.hlines(0.0, y_c[0], y_c[1], alpha=1,\n colors='r', linestyle='solid', lw=1)\n # ax.plot(y_c[0], -0.5, '.k', alpha=1, ms=3)\n # ax.plot(y_c[1], -0.5, '.k', alpha=1, ms=3)\n ax.vlines(y_c[0], -0.01, 0.01, alpha=1, colors='r',\n linestyle='solid', lw=1)\n ax.vlines(y_c[1], -0.01, 0.01, alpha=1, colors='r',\n linestyle='solid', lw=1)\n\n ax.set_xlim((t[0], t[-1]))\n\n ax.set_xlabel(\"Time (s)\")\n ax.set_ylabel(\"Signal positionX and Y (µm)\")\n if suptitle:\n fig.suptitle(\"Automatically detected excited axis, minima, \"\n \"maxima, and sections.\")\n\n if legend:\n if line_minima is not None:\n line_minima.set_label('minima')\n if line_maxima is not None:\n line_maxima.set_label('maxima')\n if line_rstr is not None:\n line_rstr.set_label('rightstress')\n if line_rrls is not None:\n line_rrls.set_label('rightrelease')\n if line_lstr is not None:\n line_lstr.set_label('leftstress')\n if line_lrls is not None:\n line_lrls.set_label('leftrelease')\n if line_excited_x is not None:\n line_excited_x.set_label('excited x')\n if line_excited_y is not None:\n line_excited_y.set_label('excited y')\n\n ax.legend(loc='upper right')\n\n return fig", "def plot_roc(self, X, y):\n plot_roc(self.clf, X, y)", "def plot_obj_func():\n X1 = [i for i in range(-63, 65, 1)]\n Y1 = [8 * math.sin(0.06 * x) + 8 * math.cos(0.14 * x) + 8 * math.exp(math.cos(0.2*x)) for x in X1]\n plt.plot(X1, Y1)\n plt.show()", "def generate_roc_curve(clf, X, y, survived_weight=1, plot=False, n_classes=5):\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n aucs = []\n for i in range(5):\n # shuffle and split training and test sets\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25)\n #weights = np.array([survived_weight if s == 1 else 1 for s in y_train])\n #clf.fit(X_train, y_train, sample_weight=weights)\n clf.fit(X_train, y_train)\n\n fpr[i], tpr[i], _ = roc_curve(y_test, clf.predict_proba(X_test)[:,1])\n roc_auc[i] = auc(fpr[i], tpr[i])\n aucs.append(roc_auc[i])\n print('ROC AUC: {:.2%}'.format(roc_auc[i]))\n\n auc_mean = \"{:.3%}\".format(np.mean(aucs))\n auc_std = \"{:.3%}\".format(np.std(aucs))\n auc_lower = \"{:.3%}\".format(np.mean(aucs)-np.std(aucs))\n print(\"ROC - Area under curve: {} and stddev: {}\".format(auc_mean, auc_std))\n\n if plot:\n # Plot of a ROC curve for a specific class\n plt.figure()\n for i in range(5):\n plt.plot(fpr[i], tpr[i], label='ROC curve (area = %0.2f)' % roc_auc[i])\n\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC Curve')\n plt.legend(loc=\"lower right\")\n plt.show()", "def peek(self, **kwargs):\n\n plt.figure()\n axes = plt.gca()\n data_lab=self.meta['OBS-FREQ'][0:2] + ' ' + self.meta['OBS-FREQ'][2:5]\n axes.plot(self.data.index,self.data,label=data_lab)\n axes.set_yscale(\"log\")\n axes.set_ylim(1e-4,1)\n axes.set_title('Nobeyama Radioheliograph')\n axes.set_xlabel('Start time: ' + self.data.index[0].strftime(TIME_FORMAT))\n axes.set_ylabel('Correlation')\n axes.legend()\n plt.show()", "def main():\n df = prof_conv_bwd_filter()\n df.to_csv(\"prof.cudnnConvBwdFilter.csv\")\n\n \"\"\"visualization, Roofline model\"\"\"\n df = pd.read_csv('prof.cudnnConvBwdFilter.csv', header=0, index_col=0)\n fig = plt.figure(figsize=(10,5))\n ax = fig.add_subplot(1, 1, 1)\n plot_rooline (ax, MACHINE_SPEC, PEAK_PERF, BAND_WIDTH)\n plot_result (ax, df)\n # fig.subplots_adjust(right=0.8)\n plt.subplots_adjust(left=0.1, right=0.6)\n plt.savefig('roofline.png')\n return", "def plot_ROC(self, canvas):\n\t\tfpr = self.fpr_\n\t\ttpr = self.tpr_\n\t\tauc = self.class_auroc_\n\t\tclasses = self.classes_\n\t\tnum_classes = self.num_classes_\n\n\t\trcParams.update({'font.size': 7})\n\t\tcanvas.figure.clear()\n\t\tax = canvas.figure.subplots()\n\n\t\tax.plot(fpr['avg'], tpr['avg'], label='avg (area={0})'.format(self.avg_auroc_), \\\n\t\t\tcolor = 'black', linewidth=2, linestyle='--')\n\n\t\tcolors = cycle(['blue', 'orange', 'green', 'red', 'yellow', 'purple', 'cyan'])\n\t\tfor i, color in zip(range(0, num_classes), colors):\n\t\t\tax.plot(fpr[i], tpr[i], label='{0} (area={1})'.format(classes[i], auc[classes[i]]), \\\n\t\t\t\tcolor=color, linewidth=1)\n\n\t\tax.plot([0 ,1], [0, 1], color='lightgray', linewidth=1, linestyle='--')\n\t\tax.set_xlim([0.0, 1.0])\n\t\tax.set_ylim([0.0, 1.05])\n\t\tax.set_xlabel('FPR')\n\t\tax.set_ylabel('TPR')\n\t\tax.legend(loc='lower right')\n\n\t\tcanvas.figure.tight_layout()\n\t\tcanvas.draw()", "def plt_sclr(**kwargs):\n import argparse\n import os.path\n import numpy as np\n import matplotlib\n from read_xdr import readXDRsclr, readXDRstruct\n import gc\n \n dirname=os.getcwd() + '/'\n if not os.path.exists(dirname+\"plots\"):\n os.makedirs(dirname+\"plots\")\n \n class C(object):\n pass\n arg=C()\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-i', type=int, help='time index')\n parser.add_argument('-p', type=int, help='probe')\n parser.add_argument('-minx', type=float, help='minimum x')\n parser.add_argument('-maxx', type=float, help='maximum x')\n parser.add_argument('-minz', type=float, help='minimum z')\n parser.add_argument('-maxz', type=float, help='maximum z')\n parser.add_argument('-theta', type=float, action='append', help='azimuthal angle (range)')\n parser.add_argument('-polarz', type=float, action='append', help='polar projection at z (range)')\n \n parser.add_argument('-cmap', type=str, help='colormap')\n parser.add_argument('-cmin', type=float, help='min value')\n parser.add_argument('-cmax', type=float, help='max value')\n parser.add_argument('-slicex', type=float, action='append', help='z-slice along given x')\n parser.add_argument('-slicez', type=float, action='append', help='x-slice along given z')\n \n parser.add_argument('-f', action='store_true', help=\"add horizontal mirror image\")\n parser.add_argument('-a', action='store_true', help=\"keep aspect ratio\")\n parser.add_argument('-log', action='store_true', help=\"log scale\")\n parser.add_argument('-smooth', action='store_true', help=\"smooth contour\")\n parser.add_argument('-save', action='store_true', help=\"save only\")\n parser.add_argument('-noaxis', action='store_true', help=\"show axis\")\n parser.add_argument('-crit', type=float, help=\"use density criterion\")\n \n \n if \"arglist\" in kwargs:\n print(kwargs[\"arglist\"])\n parser.parse_args(kwargs[\"arglist\"],namespace=arg)\n else:\n parser.parse_args(namespace=arg) \n \n num = int(arg.i) if arg.i != None else 1\n ivar = int(arg.p) if arg.p != None else 0\n \n x0 = float(arg.minx) if arg.minx != None else None \n x1 = float(arg.maxx) if arg.maxx != None else None \n z0 = float(arg.minz) if arg.minz != None else None \n z1 = float(arg.maxz) if arg.maxz != None else None\n \n if arg.theta != None:\n theta = arg.theta \n if type(theta)!=list: \n theta=[theta]\n theta = np.sort(theta)\n else:\n theta = None\n \n if arg.polarz != None:\n polarZ = arg.polarz \n if type(polarZ)!=list: \n polarZ=[polarZ]\n polarZ = np.sort(polarZ)\n else:\n polarZ = None\n \n cmap = arg.cmap if arg.cmap != None else None \n cmin = float(arg.cmin) if arg.cmin != None else None \n cmax = float(arg.cmax) if arg.cmax != None else None \n \n sliceX = arg.slicex if arg.slicex != None else None\n sliceZ = arg.slicez if arg.slicez != None else None \n \n flip = arg.f\n aspect= arg.a\n logScale = arg.log\n smooth = arg.smooth\n noShow = arg.save\n noAxis = arg.noaxis\n \n crit = float(arg.crit) if arg.crit != None else None \n \n if noShow: matplotlib.use('Agg') # to run in pdebug (not using $DISPLAY)\n import matplotlib.pyplot as plt\n if 'classic' in plt.style.available: plt.style.use('classic')\n import matplotlib.collections as mc\n from matplotlib.colors import LogNorm\n from matplotlib import ticker\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n import pylab as pl\n \n def line2d(x0,x1,z0,z1,X,Z,dat,val):\n from scipy import interpolate\n \n if (z1==z0): return()\n \n indx=np.argwhere((X>=np.min([x0,x1]))&(X<=np.max([x0,x1])))\n indx=indx.reshape(indx.shape[0],1)\n indz=np.argwhere((Z>=z0)&(Z<=z1))\n indz=indz.reshape(1,indz.shape[0])\n \n tanA=(x1-x0)/(z1-z0)\n # cosA = np.cos(np.arctan(tanA))\n npt=100\n z = np.linspace(z0,z1,npt)\n x = x0 + (z-z0)*tanA\n r = np.sqrt((x-x0)**2 + (z-z0)**2).reshape((npt,1))\n f=interpolate.interp2d(X[indx].flatten(), Z[indz].flatten(), dat[indx,indz].transpose(), kind='cubic')\n lnout = np.zeros((npt,1))\n for i in range(npt):\n lnout[i]=f(x[i], z[i])\n \n f, ax = plt.subplots()\n # ax.plot(z[1:],np.diff(lnout[:,0])/np.diff(r[:,0]),linewidth=2)\n ax.plot(z,lnout,linewidth=2)\n i = np.nonzero(lnout[::-1]>val)[0][0] \n print(x[::-1][i])\n print(z[::-1][i])\n plt.show()\n return(lnout)\n \n \n def plotSliceZ():\n data=np.zeros((len(X),1));\n if len(sliceZ)>1:\n zrange = np.linspace(sliceZ[0],sliceZ[1],100)\n else:\n zrange = sliceZ\n \n for i in range(len(X)):\n data[i,0]=np.mean(np.interp(zrange,Z[:],dat[i,:]));\n f, ax = plt.subplots()\n ax.plot(X[:],data[:,0],linewidth=2)\n ax.plot(X[:],np.ones(len(data[:,0]))*np.mean(data[:,0]),linewidth=2)\n print(\"SliceZ mean %.2e\" %(np.mean(data[:,0])))\n ax.set(title=VarNames[ivar]+\" (\" + \\\n VarUnits[ivar] + \")\" +\" X-slice at Z = %.2f\" %(sliceZ[0]) + \" at \"+tstamp+\"ns\")\n ax.set_xlim([xmin,xmax]);\n ax.set_ylim([cmin,cmax]);\n ax.set(xlabel='X (cm)', ylabel=VarUnits[ivar])\n if logScale: ax.set(yscale='log')\n f.savefig(dirname +\"plots/\" + VarNames[ivar] + \"_%06.1f\" %(time) + '_sliceZ.png',dpi=200, bbox_inches=\"tight\")\n # fout = open(dirname +\"plots/slicez.out\", \"a+\")\n # fout.write(\"%.8e %.8e\\n\" %(time, np.mean(data[0,:])));\n # fout.close() \n fout = open(dirname +\"plots/\" + VarNames[ivar] + \"_%06.1f\" %(time) + '_sliceZ.out', \"w\")\n for i in range(len(X)):\n fout.write(\"%.8e %.8e\\n\" %(X[i],data[i,0]))\n fout.close()\n if not noShow: \n plt.show()\n # # sheath front locator\n # zdum=Z[0,:]\n # ydum=np.diff(data[0,:]);\n # zfront=zdum[np.argwhere(ydum==np.min(ydum))][0]\n # print(\"sheath front z-location %.2f\" %(zfront))\n # fout = open(dirname+'sheath_front.out', \"a\")\n # fout.write(\"%.8e %.8e\" %(time, zfront));\n # fout.write(\"\\n\");\n # fout.close() \n return()\n \n def plotSliceX():\n data=np.zeros((1,len(Z)));\n if len(sliceX)>1:\n xrange = np.linspace(sliceX[0],sliceX[1],100)\n else:\n xrange = sliceX\n \n for j in range(len(Z)):\n data[0,j]=np.mean(np.interp(xrange,X[:],dat[:,j]));\n f, ax = plt.subplots()\n ax.plot(Z[:],data[0,:],linewidth=2)\n ax.plot(Z[:],np.ones(len(data[0,:]))*np.mean(data[0,:]),linewidth=2)\n print(\"SliceX integral %.2e\" %(np.trapz(data[0,:],Z)))\n ax.set(title=VarNames[ivar]+\" (\" + \\\n VarUnits[ivar] + \")\" +\" Z-slice at X = %.4f\" %(sliceX[0]) + \" at \"+tstamp+\"ns\")\n ax.set_xlim([zmin,zmax]);\n ax.set_ylim([cmin,cmax]);\n ax.set(xlabel='Z (cm)', ylabel=VarUnits[ivar])\n if logScale: ax.set(yscale='log')\n f.savefig(dirname +\"plots/\" + VarNames[ivar] + \"_%06.1f\" %(time) + '_sliceX.png',dpi=200, bbox_inches=\"tight\")\n fout = open(dirname +\"plots/slicex.out\", \"a+\")\n fout.write(\"%.8e %.8e\\n\" %(time, np.mean(data[0,:])));\n fout.close()\n fout = open(dirname +\"plots/\" + VarNames[ivar] + \"_%06.1f\" %(time) + '_sliceX.out', \"w\")\n for i in range(len(Z)):\n fout.write(\"%.8e %.8e\\n\" %(Z[i],data[0,i]))\n fout.close()\n\n ind = np.nonzero(data[0,::-1]>2e18)[0][0]\n fout = open(dirname +\"plots/front.out\", \"a+\")\n fout.write(\"%.8e %.8e\\n\" %(time, Z[::-1][ind]));\n fout.close() \n \n if not noShow: \n plt.show()\n return()\n \n def addStructure(ax):\n (xa,ya,za,xb,yb,zb)=readXDRstruct(dirname+'struct.p4')\n lines=[[(za[i],xa[i]),(zb[i],xb[i])] for i in range(len(xa))]\n lc = mc.LineCollection(lines, color=(0.9,0.9,0.9),linewidths=1)\n ax.add_collection(lc)\n if flip:\n lines=[[(za[i],-xa[i]),(zb[i],-xb[i])] for i in range(len(xa))]\n lc = mc.LineCollection(lines, color=(0.9,0.9,0.9),linewidths=1)\n ax.add_collection(lc)\n return()\n \n def plotPolar():\n r, th = np.meshgrid(X, Y)\n f, ax = plt.subplots(subplot_kw=dict(projection='polar'),figsize=(8,8))\n cm = matplotlib.cm.get_cmap(cmap)\n val = dat.transpose(1,0)\n # contour_levels = arange(cmin, 3, 0.05)\n if logScale:\n # im = ax.pcolor(th, r, dat.transpose(1,0),norm=LogNorm(vmin=cmin,vmax=cmax),cmap=cm)\n im = ax.contourf(th, r, val,norm=LogNorm(vmin=cmin,vmax=cmax),cmap=cm)\n else:\n # im = ax.pcolor(th, r, dat.transpose(1,0),vmin=cmin,vmax=cmax,cmap=cm)\n im = ax.contourf(th, r, val,100,vmin=cmin,vmax=cmax,cmap=cm)\n ax.xaxis.grid(True, zorder=0)\n ax.yaxis.grid(True, zorder=0)\n \n \n if noAxis:\n ax.set(title=VarNames[ivar]) \n ax.get_xaxis().set_visible(True)\n ax.get_yaxis().set_visible(False)\n else:\n title = VarNames[ivar] + \" (\" + VarUnits[ivar] + \")\" +\"/Time=%.1fns/Z=%.1fcm\" %(time,polarZ[0])\n ax.set(title=title, xlabel=\"R (cm)\")\n plt.colorbar(im)\n \n \n f.savefig(dirname + \"plots/\" + VarNames[ivar] + \"_T%06.1f_Z%04.1f.png\" %(time,polarZ[0]),dpi=200, bbox_inches=\"tight\")\n if not noShow: \n plt.show()\n return()\n \n \n \n #######################################################\n \n fname=dirname+'sclr'+str(num)+'.p4'\n print(fname)\n \n if \"sdata\" in kwargs:\n (X,Y,Z,Var,VarNames,VarUnits,time)=kwargs[\"sdata\"]\n else:\n (X,Y,Z,Var,VarNames,VarUnits,time)=readXDRsclr(fname,silent=noShow)\n \n # if not noShow:\n # for i in range(len(Var)):\n # print(str(i) + \" \" + VarNames[i] + \" \" + VarUnits[i])\n tstamp = \"%.2f\" % time\n \n dat=Var[ivar]\n if crit!=None:\n cvar = VarNames.index(\"RhoT\" + VarNames[ivar][4:])\n dat[np.nonzero(Var[cvar]<crit)]=0.0\n \n if flip: x0=min(X)\n xmin=x0 if (x0!=None) and (x0>min(X)) and (x0<=max(X)) else min(X)\n xmax=x1 if (x1!=None) and (x1>xmin) and (x1<=max(X)) else max(X)\n zmin=z0 if (z0!=None) and (z0>min(Z)) and (z0<max(Z)) else min(Z)\n zmax=z1 if (z1!=None) and (z1>zmin) and (z1<=max(Z)) else max(Z)\n \n indx=np.argwhere((X>=xmin)&(X<=xmax))\n indx=indx.reshape(indx.shape[0],1,1)\n indz=np.argwhere((Z>=zmin)&(Z<=zmax))\n indz=indz.reshape(1,1,indz.shape[0])\n indy=np.arange(len(Y)).reshape(1,len(Y),1)\n \n #indz=np.transpose(indz)\n X=X[indx].flatten()\n Y=Y[indy].flatten()\n Z=Z[indz].flatten()\n \n dat=dat[indx,indy,indz]\n \n if type(polarZ)!=type(None):\n # calculate weights for nonuniform grid spacing\n dz = np.diff(Z)\n wZ = np.ones(len(Z))\n wZ[0] = 0.5*dz[0]\n wZ[1:-1]=0.5*(dz + np.roll(dz,-1))[0:-1]\n wZ[-1]=0.5*dz[-1]\n \n zdum = polarZ[0]\n k10 = np.max([1,int(np.argwhere(Z>=zdum)[0][0])])\n k00 = int(k10-1)\n w00 = (Z[k10] - zdum)/(Z[k10] - Z[k00])\n wZ[k00] *= w00\n \n zdum = polarZ[-1]\n k11 = np.max([1,int(np.argwhere(Z>=zdum)[0][0])])\n k01 = int(k11-1)\n w01 = (Z[k11] - zdum)/(Z[k11] - Z[k01])\n wZ[k11] *= (1-w01)\n dat = np.average(dat[:,:,k00:k11+1], axis=2, weights=wZ[k00:k11+1])\n \n plotPolar()\n exit() \n \n if type(theta)!=type(None):\n wY = np.ones(len(Y))\n # calculate weights for nonuniform grid spacing\n if len(Y)>1:\n dy = np.diff(Y)\n wY[0] = 0.5*dy[0]\n wY[1:-1]=0.5*(dy + np.roll(dy,-1))[0:-1]\n wY[-1]=0.5*dy[-1] \n \n thdum = np.radians(theta[0])\n thdum = divmod(thdum,np.pi*2)[1] if thdum>np.pi*2 else thdum\n j10 = int(np.argwhere(Y>=thdum)[0][0])\n if j10==0: \n j10+=1\n j00 = int(j10-1)\n w00 = (Y[j10] - thdum)/(Y[j10] - Y[j00])\n wY[j00] = w00*wY[j00] # weight of outermost left grid point\n \n thdum = np.radians(theta[-1])\n thdum = divmod(thdum,np.pi*2)[1] if thdum>np.pi*2 else thdum\n j11 = int(np.argwhere(Y>=thdum)[0][0])\n if j11==0: \n j11+=1 \n j01 = int(j11-1)\n w01 = (Y[j11] - thdum)/(Y[j11] - Y[j01])\n wY[j11] = (1 - w01)*wY[j11] # # weight of outermost right grid point\n \n dat = np.average(dat[:,j00:j11+1,:], axis=1, weights=wY[j00:j11+1])\n else:\n dat=np.squeeze(dat[:,0,:])\n \n if cmin is None:\n cmin = dat.min()\n if cmax is None:\n cmax = dat.max()\n \n fig, ax = plt.subplots(figsize=(8,8))\n if flip:\n dat=np.concatenate((np.flipud(dat),dat),axis=0);\n X = np.concatenate([-X[::-1],X])\n \n cm = matplotlib.cm.get_cmap(cmap)\n \n if logScale:\n locator = ticker.LogLocator(base=10)\n if smooth:\n dat[np.nonzero(dat>cmax)] = cmax\n dat[np.nonzero(dat<cmin)] = cmin\n levels = np.power(10,np.linspace(np.log10(cmin),np.log10(cmax),num=100))\n im = ax.contourf(Z,X,dat,norm=LogNorm(vmin=cmin,vmax=cmax),cmap=cm,locator=locator,levels=levels)\n else:\n im = ax.pcolor(Z,X,dat,norm=LogNorm(vmin=cmin,vmax=cmax),cmap=cm)\n else:\n if smooth:\n dat[np.nonzero(dat>cmax)] = cmax\n dat[np.nonzero(dat<cmin)] = cmin \n levels = ticker.MaxNLocator(nbins=100).tick_values(cmin, cmax)\n im = ax.contourf(Z,X,dat,vmin=cmin,vmax=cmax,cmap=cm,levels=levels)\n else:\n im = ax.pcolor(Z,X,dat,vmin=cmin,vmax=cmax,cmap=cm)\n \n ax.set_xlim([min(Z[:]),max(Z[:])])\n ax.set_ylim([min(X[:]),max(X[:])])\n \n title = VarNames[ivar] + \" (\" + VarUnits[ivar] + \")\" + \"/Time %.1fns\" %(time)\n if type(theta)!=type(None):\n title = title + \"/Angle %.1f-%.1fdeg\" %(theta[0],theta[-1])\n ax.set(title=title,xlabel=\"Z (cm)\",ylabel=\"R (cm)\")\n \n # add structure\n if os.path.isfile(dirname+'struct.p4'):\n addStructure(ax)\n \n if (aspect or flip):\n ax.set_aspect('equal');\n else:\n ax.set_aspect('auto') \n \n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n \n if not logScale: #set exponent base for colorbar formatter\n CB = plt.colorbar(im, cax=cax)\n CB.formatter.set_powerlimits((0, 2))\n CB.update_ticks()\n else:\n CB = plt.colorbar(im, cax=cax, ticks=locator)\n \n if noAxis:\n ax.set(title=VarNames[ivar],\n xlabel=\"Z (cm)\", ylabel=\"R (cm)\") \n plt.axis('off')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n \n ftitle = VarNames[ivar] + \"_T%07.1f\" %(time)\n if type(theta)!=type(None):\n ftitle = ftitle + \"_A%5.1f-%5.1f\" %(theta[0],theta[-1])\n fig.savefig(dirname + \"plots/\" + ftitle + '.png',dpi=200, bbox_inches=\"tight\")\n #cax.yaxis.major.locator.set_params(nbins=8) \n \n if sliceX!= None and sliceX[-1]<=xmax and sliceX[0]>=xmin: # plot slice along Z\n plotSliceX()\n \n if sliceZ!= None and sliceZ[-1]<=zmax and sliceZ[0]>=zmin: # plot slice along X\n plotSliceZ()\n \n # line2d(0,2,0,8,X,Z,dat,1.5e18)\n \n if not noShow: \n plt.show()\n \n return()", "def plot_roc_curves(labels, probas, name='', ax=None):\n # Setup axis\n if ax is None:\n fig, ax = plt.subplots(figsize=(20, 10))\n\n plot_roc(labels, probas, name=name, ax=ax)\n\n # Plot chance\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='black', alpha=.8)\n\n # Fill bottom right\n ax.fill_between([0, 1], [0, 1], alpha=0.3, color='black')\n\n # Settings\n ax.set_xlabel('False Positive Rate or (1 - Specifity)', fontsize=15)\n ax.set_ylabel('True Positive Rate or (Sensitivity)', fontsize=15)\n ax.set_title('Receiver Operating Characteristic', weight='bold', fontsize=18)\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n ax.legend(loc='lower right')\n\n return ax", "def plot(t): \n assert isinstance(t, int), \"'t' argument should be an integer.\"\n assert t > 0, \"'t' argument should be a positive integer.\" \n # Initialize arrays with zeros to store mean cumulative rewards upto t \n # rounds for each of the three implemented bandit algorithms\n EpsGreedy_rewards = np.zeros(t)\n UCB_rewards = np.zeros(t)\n LinUCB_rewards = np.zeros(t)\n # For each round, store the mean cumulative rewards upto that round\n for i in range(1,t):\n EpsGreedy_rewards[i] = np.sum(results_EpsGreedy[0:i]) / t\n UCB_rewards[i] = np.sum(results_UCB[0:i]) / t\n LinUCB_rewards[i] = np.sum(results_LinUCB[0:i]) / t\n # Plot running per round cumulative reward\n plt.plot(range(0,t), EpsGreedy_rewards, color='b', label='e-Greedy')\n plt.plot(range(0,t), UCB_rewards, color='g', label='UCB')\n plt.plot(range(0,t), LinUCB_rewards, color='orange', label='LinUCB')\n plt.xlabel('Round')\n plt.ylabel('Mean Cumulative Reward')\n plt.title('Running Per Round Cumulative Reward')\n plt.legend()\n plt.show()", "def _bokeh_roc_curve(\n y_true_binary: np.ndarray,\n y_pred_score: np.ndarray,\n title_rows: Sequence[str],\n sample_weights: Optional[np.ndarray],\n) -> Callable[[], Figure]:\n assert y_true_binary.shape == y_pred_score.shape\n assert set(y_true_binary).issubset({0, 1}) or set(y_true_binary).issubset(\n {False, True}\n )\n assert np.ndim(y_true_binary) == 1\n\n fpr, tpr, thresholds = sklmetrics.roc_curve(\n y_true=y_true_binary, y_score=y_pred_score, sample_weight=sample_weights\n )\n\n def figure() -> Figure:\n source = ColumnDataSource(\n data={\n \"FPR\": fpr,\n \"TPR\": tpr,\n \"threshold\": thresholds,\n \"specificity\": 1.0 - fpr,\n }\n )\n\n p = plotting.figure(\n plot_height=400,\n plot_width=350,\n tools=TOOLS,\n toolbar_location=TOOLBAR_LOCATION,\n # toolbar_location=None, # hides entire toolbar\n match_aspect=True,\n )\n\n p.xaxis.axis_label = \"FPR\"\n p.yaxis.axis_label = \"TPR\"\n\n add_title_rows(p, title_rows)\n apply_default_style(p)\n\n curve = p.line(x=\"FPR\", y=\"TPR\", line_width=2, color=DARK_BLUE, source=source)\n p.line(\n x=[0.0, 1.0],\n y=[0.0, 1.0],\n line_alpha=0.75,\n color=\"grey\",\n line_dash=\"dotted\",\n )\n\n p.add_tools(\n HoverTool(\n # make sure there is no tool tip for the diagonal baseline\n renderers=[curve],\n tooltips=[\n (\"TPR\", \"@TPR\"),\n (\"FPR\", \"@FPR\"),\n (\"Sensitivity\", \"@TPR\"),\n (\"Specificity\", \"@specificity\"),\n (\"Threshold\", \"@threshold\"),\n ],\n # display a tooltip whenever the cursor is vertically in line with a glyph\n mode=\"vline\",\n )\n )\n\n return p\n\n return figure", "def plot(self):\n\t\tself.plotOfXray().plot()", "def plot():\n xvals = np.arange(-50, 250, step=0.1)\n\n fig = plt.figure()\n plt.suptitle(\"Gaussian with smooth transition to power law\")\n\n A0vals = [10, 11]\n avals = [5*10**-3, 10**-3, 5*10**-4]\n ttvals = [10., 50., 100.]\n cvals = [-0.1, -0.9, -5./3., -4.]\n offset = [-30, 0.0, 30]\n\n paramvals = [A0vals, avals, ttvals,cvals, offset]\n titles, labels = return_parameter_names()\n\n nplots = len(paramvals)\n\n for i in range(nplots):\n plt.subplot(nplots, 1, i+1)\n vals = paramvals[i]\n for j in range(len(vals)):\n pset = list(default())\n pset[i] = vals[j]\n yvals=[]\n ypower=[]\n ypeak=[]\n for x in xvals:\n yvals.append(fitfunc(x, pset))\n ypeak.append(logpeak(x,pset))\n if x > 0:\n ypower.append(logpowerlaw(x,pset))\n label = labels[i] + \"=\"+str(vals[j])\n plt.plot(xvals, yvals, label = label)\n\n plt.title(titles[i])\n plt.legend()\n\n fig.set_size_inches(15, 30)\n plt.savefig(\"graphs/misc/lightcurve_models.pdf\")\n plt.close()", "def run_plot(args):\n # print(\"running chronqc_plot\")\n chronqc_plot.main(args)", "def main():\n # Create our array of x values\n x = np.linspace(-np.pi, np.pi)\n\n # plot x and sin(x)\n plt.plot(x, np.sin(x))\n\n # plot x and cos(x)\n plt.plot(x, np.cos(x))\n\n #Add title and labels\n plt.title(\"Graph of Sine and Cosine\")\n plt.xlabel(\"Radians\")\n\n # Show graph in matplotlib gui\n plt.show()", "def plot(self,displayplt = True,saveplt = False,savepath='',polarplt=True, dbdown = False):\n plt.figure()\n\n #legacy beamprofile data is a 1-D array of the peak negative pressure\n if len(self.hydoutput.shape)<2:\n pnp = self.hydoutput\n else:\n sensitivity = hyd_calibration(self.cfreq)\n pnp = -1*np.min(self.hydoutput,1)/sensitivity\n\n if dbdown:\n pnp = 20.0*np.log10(pnp/np.max(pnp))\n else:\n pnp = pnp*1e-6\n\n figure1 = plt.plot(self.depth, pnp)\n #the latest beamprofile data should be a 2-D array of the hydrophone output\n plt.xlabel('Depth (mm)')\n if dbdown:\n plt.ylabel('Peak Negative Pressure (dB Max)')\n else:\n plt.ylabel('Peak Negative Pressure (MPa)')\n plt.title(self.txdr)\n if displayplt:\n plt.show()\n if saveplt:\n if savepath=='':\n #prompt for a save path using a default filename\n defaultfn = self.txdr+'_'+self.collectiondate+'_'+self.collectiontime+'_depthprofile.png'\n savepath = tkinter.filedialog.asksaveasfilename(initialfile=defaultfn, defaultextension='.png')\n plt.savefig(savepath)\n return figure1, savepath", "def plot_curves():\n lm = np.arange(0, 1.8, .01)\n vm = np.arange(-1.2, 1.2, .01)\n lt = np.arange(0, 1.07, .01)\n plt.subplot(2,1,1)\n plt.plot(lm, force_length_muscle(lm), 'r')\n plt.plot(lm, force_length_parallel(lm), 'g')\n plt.plot(lt, force_length_tendon(lt), 'b')\n plt.legend(('CE', 'PE', 'SE'))\n plt.xlabel('Normalized length')\n plt.ylabel('Force scale factor')\n plt.subplot(2, 1, 2)\n plt.plot(vm, force_velocity_muscle(vm), 'k')\n plt.xlabel('Normalized muscle velocity')\n plt.ylabel('Force scale factor')\n plt.tight_layout()\n plt.show()", "def plot_roc(self, out_tag):\n roc_fig = self.plotter.plot_roc(self.y_train, self.y_pred_train, self.train_weights, \n self.y_test, self.y_pred_test, self.test_weights, out_tag=out_tag\n )\n\n Utils.check_dir('{}/plotting/plots/{}'.format(os.getcwd(), out_tag))\n roc_fig.savefig('{0}/plotting/plots/{1}/{1}_ROC_curve.pdf'.format(os.getcwd(),out_tag))\n print('saving: {0}/plotting/plots/{1}/{1}_ROC_curve.pdf'.format(os.getcwd(),out_tag))\n plt.close()\n\n #for MVA ROC comparisons later on\n np.savez(\"{}/models/{}_ROC_comp_arrays\".format(os.getcwd(), out_tag), self.y_pred_test, self.y_pred_test, self.test_weights)", "def plot_roc_curve(y, y_pred, threshold_step=0.001):\r\n # define the thresholds that will be used to compute the ROC curve\r\n thresholds = np.arange(threshold_step, 1.0, threshold_step)\r\n\r\n # define the list with the values of (sensitivity and 1 - specificity)\r\n recalls = []\r\n fall_outs = []\r\n\r\n # compute the metrics for every threshold\r\n for threshold in thresholds:\r\n\r\n # get the roc metrics\r\n recall, fall_out = roc_metrics(y_pred, y, threshold=threshold)\r\n\r\n # append to the corresponding lists\r\n recalls.append(recall)\r\n fall_outs.append(fall_out)\r\n\r\n # configure the size of the ROC curve plots\r\n plt.rcParams[\"figure.figsize\"] = [15, 10]\r\n plt.rcParams[\"xtick.labelsize\"] = 14\r\n plt.rcParams[\"ytick.labelsize\"] = 14\r\n\r\n # plot the ROC curve\r\n plt.plot(fall_outs, recalls, color=\"darkcyan\", label=\"RNN Classifier\")\r\n\r\n # plot y = x for comparison\r\n x = np.arange(0, 1.01, 0.1)\r\n plt.plot(x, x, color=\"brown\", linestyle=\"--\", label=r\"$y\\;=\\;x$\")\r\n\r\n # add legend, labels and title\r\n plt.xlabel(\"\\n1 - Specificity\", fontsize=20)\r\n plt.ylabel(\"Sensitivity\\n\", fontsize=20)\r\n plt.title(\"ROC curve\\n\", fontsize=25)\r\n plt.legend()\r\n plt.grid()\r\n plt.show()", "def plot_roc(model, X_test, Y_test, verbose=False):\n\n y_true, y_pred = Y_test, model.predict(X_test)\n if verbose:\n print(\"CLASSIFICATION REPORT\")\n print(classification_report(y_true, y_pred))\n\n y_pred_prob = model.predict_proba(X_test)[:,1]\n\n fpr, tpr, _ = roc_curve(Y_test, y_pred_prob)\n\n if verbose:\n print(\"TESTING PROBABILITIES:\")\n for a,b in zip(Y_test,y_pred_prob):\n print(a,b)\n \n if verbose:\n print(\"ROC RAW DATA:\")\n for a,b in zip(fpr, tpr):\n print(a,b)", "def _roc_plot(metrics, dummy_metrics, save_name):\n plt.figure()\n plt.plot([0, 1], [0, 1], \"k--\")\n if dummy_metrics is None or len(dummy_metrics) == 0:\n do_dummy = False\n else:\n do_dummy = True\n\n # Compute average ROC\n base_fpr = np.linspace(0, 1, 1000)\n # base_fpr = np.logspace(-4, 0, 1000)\n all_tpr = np.zeros((len(metrics), len(base_fpr)), float)\n for i, metric_set in enumerate(metrics):\n all_tpr[i, :] = np.interp(base_fpr, metric_set[\"fpr\"], metric_set[\"tpr\"])\n\n if do_dummy:\n all_tpr_dummy = np.zeros((len(dummy_metrics), len(base_fpr)), float)\n for i, metric_set in enumerate(dummy_metrics):\n all_tpr_dummy[i, :] = np.interp(\n base_fpr, metric_set[\"fpr\"], metric_set[\"tpr\"]\n )\n\n for _, metric_set in enumerate(dummy_metrics):\n plt.plot(\n metric_set[\"fpr\"],\n metric_set[\"tpr\"],\n color=\"lightsteelblue\",\n linewidth=0.5,\n alpha=0.5,\n )\n\n for _, metric_set in enumerate(metrics):\n plt.plot(\n metric_set[\"fpr\"], metric_set[\"tpr\"], color=\"lightsalmon\", linewidth=0.5\n )\n\n tpr_mu = all_tpr.mean(axis=0)\n plt.plot(base_fpr, tpr_mu, \"r\")\n\n if do_dummy:\n dummy_mu = all_tpr_dummy.mean(axis=0)\n plt.plot(base_fpr, dummy_mu, \"b\")\n\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.tight_layout()\n plt.grid()\n plt.savefig(save_name)", "def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Get signal efficieny once\n eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)\n # Perform some basic plotting setup\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n # Then efficiencies per bkg category (ttjets, qcd, ...)\n bkg_categories = list(set([ b.get_category() for b in bkgs ]))\n bkg_categories.sort()\n lines = {}\n for bkg_cat in bkg_categories:\n # Get Datasets that have this category\n bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ]\n # Compute efficiency in this category\n eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values)\n # Draw roccurve for this category\n line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)\n line.set_label(bkg_cat)\n # Save this line in a dict for potential outputting/modifying\n lines[bkg_cat] = line\n return ax", "def plot(\n self,\n curve: Optional[Tuple[Tensor, Tensor, Tensor]] = None,\n ax: Optional[_AX_TYPE] = None,\n ) -> _PLOT_OUT_TYPE:\n curve = curve or self.compute()\n return plot_curve(\n curve,\n ax=ax,\n label_names=(\"False positive rate\", \"True positive rate\"),\n name=self.__class__.__name__,\n )", "def plot_data(self):", "def plot_it(self, ui=None, res_dir=None):\r\n print_it(\"plotting results\")\r\n plt_profile(self.sun, PltOpts.DD.value, res_dir, SubDir.profiles.value)\r\n plt_profile(self.sun, PltOpts.DDD.value, res_dir, SubDir.profiles.value)\r\n plt_profile(self.planet, PltOpts.DD.value, res_dir,\r\n SubDir.profiles.value)\r\n plt_profile(self.planet, PltOpts.DDD.value, res_dir,\r\n SubDir.profiles.value)\r\n plt_profile_approx(res_dir, SubDir.profiles.value)\r\n plt_contact(self.sun, self.planet, PltOpts.DD.value, res_dir,\r\n SubDir.contacts.value)\r\n plt_contact(self.sun, self.planet, PltOpts.DDD.value, res_dir,\r\n SubDir.contacts.value)\r\n\r\n plt_3d(self.sun.x_axis, self.sun.y_axis, self.sun.press,\r\n self.sun.x_label, self.sun.y_label, 'pressure in MPa',\r\n 'contact_pressure_sun', res_dir, SubDir.pressures.value,\r\n 'contact_pressure_sun')\r\n plt_2d_scatt_line(self.sun.x_axis, self.pv, self.sun.x_axis, self.pv,\r\n self.sun.x_label,\r\n 'pv_rel in {}'.format(Unit.pvrel.value), 'pv_rel',\r\n res_dir, SubDir.energy.value, 'pv_rel')\r\n plt_2d_scatt_line(self.sun.x_axis, self.sun.e_akin, self.sun.x_axis,\r\n self.sun.e_akin, self.sun.x_label,\r\n 'e_akin in {}'.format(Unit.eakin.value), 'e_akin',\r\n res_dir, SubDir.energy.value, 'sun.e_akin')\r\n plt_2d_scatt_line(self.planet.x_axis, self.planet.e_akin,\r\n self.planet.x_axis, self.planet.e_akin,\r\n self.planet.x_label,\r\n 'e_akin in {}'.format(Unit.eakin.value), 'e_akin',\r\n res_dir,\r\n SubDir.energy.value, 'planet.e_akin')\r\n plt_energy_ring_on_ring(self, res_dir, SubDir.energy.value,\r\n 'e-akin-vs-pv-rel')", "def prc(test_set_y_org,test_set_y_pred_prob,classes_unique,plot_curve=False,filename=\"./fig_prc.pdf\",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):\n from sklearn.metrics import precision_recall_curve\n from sklearn.metrics import average_precision_score\n #from scipy import interp\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n \n precision = dict()\n recall = dict()\n average_precision = dict()\n n_classes=len(classes_unique)\n test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)\n \n for c in range(n_classes):\n precision[c], recall[c], _ = precision_recall_curve(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])\n average_precision[c] = average_precision_score(test_set_Y_org[:, c], test_set_y_pred_prob[:, c])\n \n # Compute macro-average ROC curve and AUROC area\n # First aggregate all recalls\n all_recall = np.unique(np.concatenate([recall[c] for c in range(n_classes)]))\n #all_recall = np.sort(np.concatenate([recall[c] for c in range(n_classes)]))\n # Then interpolate all PRC curves at this points\n mean_precision = np.zeros_like(all_recall)\n for c in range(n_classes):\n mean_precision = mean_precision + np.interp(all_recall, recall[c][::-1], precision[c][::-1]) # xp in interp() must be in increasing order\n # Finally average it and compute AUPRC\n mean_precision = mean_precision/n_classes\n recall[\"macro\"] = all_recall\n precision[\"macro\"] = mean_precision\n #roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n # Compute micro-average ROC curve and ROC area\n precision[\"micro\"], recall[\"micro\"], _ = precision_recall_curve(test_set_Y_org.ravel(), test_set_y_pred_prob.ravel())\n average_precision[\"macro\"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob, average=\"macro\") # micro macro, weighted, or samples\n average_precision[\"micro\"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob, average=\"micro\") # micro macro, weighted, or samples\n average_precision[\"weighted\"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob, average=\"weighted\") # micro macro, weighted, or samples\n average_precision[\"samples\"] = average_precision_score(test_set_Y_org, test_set_y_pred_prob, average=\"samples\") # micro macro, weighted, or samples\n\n if plot_curve:\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n if n_classes>2 or positive_class_for_two_classes is None:\n ax.plot(recall[\"macro\"], precision[\"macro\"], linewidth=1,color=colors[n_classes],label='macro-avg PRC (area={0:0.4f})'.format(average_precision[\"macro\"]))\n \n for c in range(n_classes):\n if positive_class_for_two_classes==None or (n_classes==2 and positive_class_for_two_classes==c):\n ax.plot(recall[c], precision[c],linewidth=1,color=colors[c],label='PRC of {0} (area={1:0.4f})'.format(classes_unique[c], average_precision[c]))\n \n # add some text for labels, title and axes ticks\n ax.set_ylim(0.0,1.0)\n ax.set_xlim(0.0,1.0)\n ax.set_ylabel(\"Precision\",fontsize=12)\n ax.set_xlabel(\"Recall\",fontsize=12) \n #ax.set_title(\"\",fontsize=15)\n ax.legend(loc=\"lower left\",fontsize=8)\n #plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)\n\n average_precision_list=[average_precision[c] for c in range(n_classes)]\n average_precision_list.extend([average_precision[\"macro\"],average_precision[\"micro\"],average_precision[\"weighted\"],average_precision[\"samples\"]])\n average_precision=np.array(average_precision_list)\n names=[\"AUPRC_\" + c for c in classes_unique]\n names.extend([\"macro\",\"micro\",\"weighted\",\"samples\"])\n names=np.array(names)\n \n return average_precision,names", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def plot_plateau(x,y,p,n,Vdc):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('Vrf [V]')\n ax.set_ylabel('Current [nA]')\n fig.suptitle('Vdc = '+str(Vdc)+' n = '+str(n), fontsize=24)\n \n plt.plot(x,y,'x',label='Experimental data') \n t = np.linspace(min(x),max(x),1000)\n plt.plot(t,f(t,p[0],p[1],p[2]),label='Fit')\n plt.axhline(y=n*e*frequency*1e9, color='black', linestyle='-')\n\n ax.legend()\n plt.show(block=True)\n plt.pause(0.3)\n plt.close()\n \n return None" ]
[ "0.7373159", "0.7010362", "0.6923584", "0.6742388", "0.66598225", "0.65181124", "0.6510751", "0.6500974", "0.64510316", "0.6430082", "0.6429037", "0.6416149", "0.63878846", "0.63805693", "0.6365573", "0.63452685", "0.63356173", "0.6325078", "0.6313239", "0.6286145", "0.6283321", "0.62715095", "0.6270201", "0.6249389", "0.6241064", "0.6240671", "0.6230367", "0.62162125", "0.62142867", "0.62104845", "0.6204575", "0.6197078", "0.6183933", "0.6183933", "0.6163628", "0.6151888", "0.61392885", "0.6135277", "0.61102885", "0.6076018", "0.6064203", "0.6061002", "0.6059636", "0.60573685", "0.6054017", "0.60452235", "0.60411924", "0.6031679", "0.6031338", "0.6026128", "0.60124516", "0.60116565", "0.60056424", "0.5999115", "0.59990364", "0.59967065", "0.5995544", "0.5994399", "0.5982529", "0.5979819", "0.59747475", "0.5970937", "0.59532815", "0.59531575", "0.5952111", "0.5949315", "0.5947619", "0.5944751", "0.5941138", "0.59283084", "0.59238636", "0.5922442", "0.59198153", "0.5917639", "0.5915845", "0.5909855", "0.5903664", "0.5897363", "0.5894748", "0.58943677", "0.5892176", "0.58903164", "0.5889229", "0.5888723", "0.5882863", "0.58581537", "0.58521056", "0.58502567", "0.58467066", "0.5843448", "0.5839311", "0.5825716", "0.5822391", "0.58213085", "0.5804416", "0.58009946", "0.57965744", "0.57943356", "0.5791571", "0.5788195" ]
0.7444382
0
Plots the roccurve per background category. Assumes signals are all datasets of the same signal.
def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None): # Get a default ax if none is given if ax is None: import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,8)) ax = fig.gca() # Get signal efficieny once eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values) # Perform some basic plotting setup ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray') ax.set_xlim(0.0, 1.05) ax.set_ylim(0.0, 1.05) ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE) ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE) ax.legend(fontsize=DEFAULT_FONTSIZE) # Then efficiencies per bkg category (ttjets, qcd, ...) bkg_categories = list(set([ b.get_category() for b in bkgs ])) bkg_categories.sort() lines = {} for bkg_cat in bkg_categories: # Get Datasets that have this category bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ] # Compute efficiency in this category eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values) # Draw roccurve for this category line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax) line.set_label(bkg_cat) # Save this line in a dict for potential outputting/modifying lines[bkg_cat] = line return ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)", "def figure_rois(self):\n channels=[[221,\"CH1 (red) average\",self.Ravg,'magenta'],\n [222,\"CH2 (green) average\",self.Gavg,'green'],\n [223,\"G/R average\",self.GoRavg,'gray'],\n [224,\"Gstd/Ravg\",self.Gstd/self.Ravg,'jet'],\n ]\n\n plt.figure(figsize=(16,12))\n for roiNumber in range(len(self.rois)):\n for i,channel in enumerate(channels):\n subplot,label,image2d,color=channel\n label+=\" [ROI %d]\"%(roiNumber)\n plt.subplot(subplot)\n plt.title(label)\n plot_image(image2d,cm=color,colorbar=(roiNumber==0),\n percentile=(1,99))\n plot_roi_bounds(self.rois[roiNumber]['bounds'],\n label=roiNumber+1)\n plt.tight_layout()\n plot_saveOrShow(self.folderSave+\"/roiAll.png\",show=False)", "def draw_roc(signal, background, output_dir=\".\", output_name=\"roc.pdf\"):\n\n x, y = get_roc(signal, background)\n auc = metrics.auc(x, y, reorder=True)\n\n fig = plt.figure(1, figsize=(6, 6), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n ax.plot(x, y, '-', color='#B64926', lw=2, label=\"AUC: %.4f\" % auc)\n ax.plot([0, 1], [0, 1], ':', color='black', lw=2, label=\"Random cut\")\n ax.margins(0.05)\n\n ax.set_xlabel(\"Background efficiency\")\n ax.set_ylabel(\"Signal efficiency\")\n \n fig.set_tight_layout(True)\n\n ax.legend(loc='lower right', numpoints=1, frameon=False)\n\n print(\"AUC: %.4f\" % auc)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()\n\n def get_index(y, value):\n \"\"\"\n Find the last index of the element in y\n satistying y[index] <= value\n \"\"\"\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i\n\n print(\"Background efficiency for signal efficiency of 0.70: %f\" % x[get_index(y, 0.70)])\n print(\"Background efficiency for signal efficiency of 0.80: %f\" % x[get_index(y, 0.80)])\n print(\"Background efficiency for signal efficiency of 0.90: %f\" % x[get_index(y, 0.90)])", "def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)", "def plot(self, data, background, scale=(5, 99)):\n # find the minimum and maximum value of plotting\n vmin = np.percentile(data, scale[0])\n vmax = np.percentile(data, scale[1])\n\n cax1 = self.ax1.imshow(data, cmap='gray', vmin=vmin, vmax=vmax,\n origin='lower')\n cax2 = self.ax2.imshow(background, cmap='viridis',\n origin='lower')\n cs = self.ax2.contour(background, colors='r', linewidths=0.5)\n self.ax2.clabel(cs, inline=1, fontsize=7, use_clabeltext=True)\n self.colorbar(cax1, cax=self.ax1c)\n self.colorbar(cax2, cax=self.ax2c)\n for ax in [self.ax1, self.ax2]:\n ax.set_xlabel('X (pixel)')\n ax.set_ylabel('Y (pixel)')\n ax.xaxis.set_major_locator(tck.MultipleLocator(500))\n ax.xaxis.set_minor_locator(tck.MultipleLocator(100))\n ax.yaxis.set_major_locator(tck.MultipleLocator(500))\n ax.yaxis.set_minor_locator(tck.MultipleLocator(100))", "def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Plot the base line\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')\n # Plot the single roccurve\n line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax)\n line.set_label(bkgs[0].get_category())\n # Plot settings\n ax.set_xlim(0.0, 1.05)\n ax.set_ylim(0.0, 1.05)\n ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)\n ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)\n ax.legend(fontsize=DEFAULT_FONTSIZE)\n return ax", "def draw_roc(signal, background, output_dir=\".\", output_name=\"roc\", form=\".pdf\"):\n\n x, y = get_roc(signal, background)\n file_path = output_dir + \"/\"+ output_name + \"_X.cvs\"\n numpy.savetxt(file_path, x, delimiter=\",\")\n file_path = output_dir + \"/\"+ output_name + \"_Y.cvs\"\n numpy.savetxt(file_path, y, delimiter=\",\")\n output_name = output_name + form\n\n auc = metrics.auc(x, y, reorder=True)\n\n fig = plt.figure(1, figsize=(7, 7), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n ax.plot(x, y, '-', color='#B64926', lw=2, label=\"AUC: %.4f\" % auc)\n ax.margins(0.05)\n\n ax.set_xlabel(\"Background efficiency\")\n ax.set_ylabel(\"Signal efficiency\")\n \n fig.set_tight_layout(True)\n\n ax.legend(loc='lower right', numpoints=1, frameon=False)\n\n print(\"AUC: %.4f\" % auc)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()\n\n def get_index(y, value):\n \"\"\"\n Find the last index of the element in y\n satistying y[index] <= value\n \"\"\"\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i\n\n print(\"Background efficiency for signal efficiency of 0.70: %f\" % x[get_index(y, 0.70)])\n print(\"Background efficiency for signal efficiency of 0.80: %f\" % x[get_index(y, 0.80)])\n print(\"Background efficiency for signal efficiency of 0.90: %f\" % x[get_index(y, 0.90)])", "def relative_src_bg(self):\n fig, ax = plt.subplots()\n \n for oneF in ['extracted_flux','extracted_bg_only']:\n wave, f = self.result['1d'][oneF]\n ax.plot(wave,f,label=oneF)\n ax.set_xlabel('Wavelength ($\\mu$m)')\n ax.set_ylabel('Extracted Flux')\n ax.legend()\n \n fig.show()", "def spiderplot(categories, values, ax=None,\n axfc = None,\n lcolor=\"k\", lsize=\"small\", \n rcolor=\"0.7\", rsize=\"small\", rarray=None,\n title=None, titlecolor=\"k\", titlesize=\"medium\",\n fillcolor = \"C0\", fillalpha=0.1, \n highlight_unique=True,\n highlight_color=\"C0\", \n **kwargs):\n import matplotlib.pyplot as mpl\n \n if highlight_unique:\n flagnonzero = np.asarray(values)>0 \n highlight = np.argwhere(flagnonzero)[0] if np.sum(flagnonzero) == 1 else None\n lcolor = \"0.5\"\n else:\n highlight = None\n \n # But we need to repeat the first value to close the circular graph:\n values = list(values)\n values += values[:1]\n ncategories = len(categories)\n \n \n # == Plot\n if ax is None:\n fig = mpl.figure(figsize=[3,3.5])\n ax = fig.add_axes([0.1,0.12,0.8,0.7], polar=True, \n facecolor=axfc,\n zorder=1)\n else:\n ax = ax\n fig = ax.figure\n\n # What will be the angle of each axis in the plot? (we divide the plot / number of variable)\n angles = [n / float(ncategories) * 2 * np.pi for n in range(ncategories)]\n angles += angles[:1]\n \n # Draw one axe per variable + add labels labels yet\n ax.set_xticks(angles[:-1])\n ax.set_xticklabels(categories, color=lcolor, size=lsize)\n \n if highlight is not None and highlight_unique:\n xtick = ax.get_xticklabels()[highlight[0]]\n xtick.set_color(highlight_color)\n xtick.set_weight(\"bold\")\n xtick.set_size(xtick.get_size()*1.2)\n \n\n \n # Draw ylabels\n ax.set_rlabel_position(0)\n \n # Scaling\n if rarray is not None: \n ax.set_yticks(rarray[:-1])\n ax.set_ylim(0,rarray[-1])\n \n ax.set_yticklabels(np.asarray(ax.get_yticks(), dtype=\"str\"), \n color=rcolor, size=rsize)\n \n # --------------- #\n # Actual Plot #\n # --------------- #\n # Plot data\n prop = dict(linewidth=1.5, linestyle='solid', color=fillcolor)\n for k,v in kwargs.items():\n prop[k] = v\n # python 3 -> prop = {**dict(linewidth=1.5, linestyle='solid'), **kwarg}\n ax.plot(angles, values, **prop)\n \n # Fill area\n ax.fill(angles, values, fillcolor, alpha=fillalpha)\n \n # Additional Info\n # First entry\n if title is not None:\n ax.set_title(title, size=titlesize, color=titlecolor)\n \n return {\"ax\":ax, \"fig\":fig, \"highlight\":highlight}", "def plot_r(self):\n for k, v, o in self.data:\n self.plot_r1(k, v, o)", "def plotCurves(self, dataByModel):\n prFigure = pyplot.figure()\n self.configChart()\n prAx = prFigure.add_subplot(111)\n prAx.set_xlabel('Recall')\n prAx.set_ylabel('Precision')\n prAx.set_title('PR Curve')\n prAx.grid(True)\n\n rocFigure = pyplot.figure()\n self.configChart()\n rocAx = rocFigure.add_subplot(111)\n rocAx.set_xlabel('Fallout / FPR')\n rocAx.set_ylabel('Recall')\n rocAx.set_title('ROC Curve')\n rocAx.grid(True)\n\n corrFigure = pyplot.figure()\n self.configChart()\n corrAx = corrFigure.add_subplot(111)\n corrAx.set_xlabel('predict score')\n corrAx.set_ylabel('real score')\n corrAx.set_title('Correlation Curve')\n corrAx.grid(True)\n\n precisionFigure = pyplot.figure()\n self.configChart()\n precisionAx = precisionFigure.add_subplot(111)\n precisionAx.set_xlabel('score')\n precisionAx.set_ylabel('Precision')\n precisionAx.set_title('Threshold score vs precision')\n precisionAx.grid(True)\n\n recallFigure = pyplot.figure()\n self.configChart()\n recallAx = recallFigure.add_subplot(111)\n recallAx.set_xlabel('score')\n recallAx.set_ylabel('Recall')\n recallAx.set_title('Threshold score vs recall')\n recallAx.grid(True)\n\n falloutFigure = pyplot.figure()\n self.configChart()\n falloutAx = falloutFigure.add_subplot(111)\n falloutAx.set_xlabel('score')\n falloutAx.set_ylabel('Fallout (False Positive Rate)')\n falloutAx.set_title('Threshold score vs fallout')\n falloutAx.grid(True)\n\n for (model, data) in list(dataByModel.items()):\n (recalls, precisions) = list(zip(*(data['PR'])))\n prAx.plot(recalls, precisions, marker='o', linestyle='--', label=model)\n\n (fallouts, recalls) = list(zip(*(data['ROC'])))\n rocAx.plot(fallouts, recalls, marker='o', linestyle='--', label=model)\n\n (pCtrs, eCtrs) = list(zip(*(data['CORR'])))\n corrAx.plot(pCtrs, eCtrs, label=model)\n\n (score, recall, precision, fallout) = list(zip(*(data['cutoff'])))\n\n recallAx.plot(score, recall, label=model + '_recall')\n precisionAx.plot(score, precision, label=model + '_precision')\n falloutAx.plot(score, fallout, label=model + '_fallout')\n\n # saving figures\n ensure_dir(self.output_dir)\n prAx.legend(loc='upper right', shadow=True)\n prFigure.savefig('%s/pr_curve.png' % self.output_dir)\n\n rocAx.legend(loc='lower right', shadow=True)\n rocFigure.savefig('%s/roc_curve.png' % self.output_dir)\n\n corrAx.legend(loc='upper left', shadow=True)\n corrFigure.savefig('%s/corr_curve.png' % self.output_dir)\n\n precisionAx.legend(loc='upper left', shadow=True)\n precisionFigure.savefig('%s/precision.png' % self.output_dir)\n\n recallAx.legend(loc='lower left', shadow=True)\n recallFigure.savefig('%s/recall.png' % self.output_dir)\n\n falloutAx.legend(loc='upper right', shadow=True)\n falloutFigure.savefig('%s/fallout.png' % self.output_dir)\n\n pyplot.close()\n pngs = '{result}/pr_curve.png {result}/roc_curve.png {result}/corr_curve.png {result}/precision.png {result}/recall.png {result}/fallout.png'.format(result=self.output_dir)\n print('png: ', pngs)", "def plot_roc(X,y,test_preds,fname=\"res/roc.png\"):\n\t#Retrieve multiple fpr and tpr values for different thresholds\n\tfpr, tpr, thresholds = roc_curve(y,test_preds)\n\tplt.plot(fpr, tpr)\n\tplt.title(auc(fpr, tpr))\n\tplt.savefig(fname, bbox_inches='tight')\n\tplt.close()", "def plot_ROC():\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n \r\n for i in range(n_classes):\r\n \r\n fpr[i], tpr[i], threshold[i] = roc_curve(y_test[:, i], y_pred[:, i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue', 'black'])\r\n \r\n for i, color in zip(range(n_classes), colors):\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n #label='Class {0} (AUC {1:0.3f})'\r\n label='AUC {1:0.2f}' \r\n ''.format(i+1, roc_auc[i])\r\n )\r\n\r\n #plt.plot([0, 1], [0, 1], 'k--', linewidth=3)\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=16)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(DNN_Model) + '_' + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, ROC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n\r\n plt.show()\r\n plt.close()", "def plot_mean_roc_curve_of_classifiers(classifier_roc_list, data_set_description):\n if const.RECORD_RESULTS is True:\n fig = plt.figure(figsize=(8, 6.66))\n monochrome = (cycler(\"color\", [\"k\"]) * cycler(\"marker\", [\"\"]) *\n cycler(\"linestyle\", [\"-\", \"--\", \"-.\"]))\n color_arr = [\"#64B3DE\", \"#1f78b4\", \"#6ABF20\", \"#FBAC44\", \"#bc1659\", \"#B9B914\", \"#33a02c\", \"#ff7f00\", \"#6a3d9a\", \"black\", \"#b15928\", \"#e31a1c\"]\n plt.rc(\"axes\", prop_cycle=monochrome)\n line_style_index = 0\n color_index = 0\n\n for (test_run_roc_list, classifier_description) in classifier_roc_list:\n if not (None, None) in test_run_roc_list[0]:\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n count = 0\n for roc_list in test_run_roc_list:\n for (tpr, fpr) in roc_list:\n mean_tpr += interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n count += 1\n\n mean_tpr /= float(count)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n line_width = 0.5\n if line_style_index == 1:\n line_width = 0.8\n elif line_style_index == 2:\n line_width = 1.5\n\n plt.plot(mean_fpr, mean_tpr, c=color_arr[color_index], lw=line_width, alpha=1, label=\"{0} ({1:.3f})\".format(classifier_description, mean_auc))\n line_style_index = (line_style_index + 1) % 3\n color_index += 1\n\n plt.locator_params(axis='x', nbins=10)\n plt.locator_params(axis='y', nbins=10)\n plt.plot([0, 1], [0, 1], \"k--\", label=\"Random classification\", lw=0.8)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"ROC curve for each classifier\")\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}_roc_classifier_plot_{1}.png\".format(data_set_description, current_time), bbox_inches=\"tight\")\n plt.close(fig)", "def plot_ROC_curves(fig, ax, y_all, perf, title=None):\n curves = {'IMPRESS_all': 'royalblue',\n 'IMPRESS_HE_only': 'plum',\n 'IMPRESS_IHC_only': 'pink',\n 'pathologists_eval': 'tomato'}\n \n type_convert = {'IMPRESS_all': 'IMPRESS',\n 'IMPRESS_HE_only': 'IMPRESS (H&E only)',\n 'IMPRESS_IHC_only': 'IMPRESS (IHC only)',\n 'pathologists_eval': 'Pathologists'}\n \n for fgroup in curves.keys():\n tprs = []\n aucs = []\n mean_fpr = np.linspace(0, 1, 100)\n ax.set_aspect('equal')\n for seed in range(int(y_all[fgroup].shape[1]/3)):\n y_true = y_all[fgroup].loc[:,'y_true'].iloc[:,seed]\n y_pred_proba = y_all[fgroup].loc[:,'y_pred_proba'].iloc[:,seed]\n tpr, fpr, treshold = roc_curve(y_true, 1-y_pred_proba)\n tprs.append(np.interp(mean_fpr, fpr, tpr))\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n ax.plot(fpr, tpr, color=curves[fgroup], linewidth=2, alpha=0.10, label=None)\n \n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n \n ax.plot(mean_fpr, mean_tpr, color=curves[fgroup],\n label=r'%s (AUC = %0.4f $\\pm$ %0.2f)' % \\\n (type_convert[fgroup], perf[fgroup].loc['AUC','mean'], perf[fgroup].loc['AUC','std']),\n linewidth=3.0, alpha=0.80)\n \n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n \n if fgroup == 'IMPRESS_all':\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.1,\n label=r'$\\pm$ 1 standard deviation')\n else:\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.1,\n label=None)\n \n ax.set_xlabel('False positive rate')\n ax.set_ylabel('True positive rate')\n x = [0.0, 1.0]\n plt.plot(x, x, linestyle='dashed', color='red', linewidth=2.0, label='Random')\n plt.legend(fontsize=10, loc='best')\n \n if title is not None:\n fig.suptitle(t=title, fontsize=12)\n return fig", "def _roc_plot(self, roc_curves):\n # figure\n p = default_figure(\n {\n \"x_range\": (-0.01, 1.1),\n \"y_range\": (-0.01, 1.1),\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\"\n }\n )\n\n # main lines added to the plot\n self._default_models_lines(p, roc_curves)\n\n # baseline comparison\n p.line(\n [0, 1], # line x=y\n [0, 1],\n line_dash=\"dashed\",\n line_width=1,\n color=self.plot_design.models_dummy_color,\n legend_label=\"Random Baseline\",\n muted_alpha=0.5 # clicked line in the Legend will be muted\n )\n\n # plot specific styling\n p.legend.location = \"bottom_right\"\n p.xaxis.axis_label = \"False Positive Rate\"\n p.yaxis.axis_label = \"True Positive Rate\"\n\n return p", "def plotYields(data,signal=None,backgrounds=[],bins=[]):\n print \n if not bins:\n center = [i+0.5 for i,d in enumerate(data)] # pseudo-data points for making histogram\n bins = [i for i in range( len(data)+1 )] # pseudo-binning\n else:\n center = [ 0.5*(b+bins[i+1]) for i,b in enumerate(bins) if i<len(bins)-1]\n data = np.array(data)\n\n # stack the backgrounds on top of each other in the plot\n nbckgs = len(backgrounds)\n labels = ['background {0}'.format(i) for i in range(nbckgs)]\n weights = list(backgrounds)\n bincenters = [ list(center) for _ in range(nbckgs)]\n\n # stack the signal on top of the backgrounds\n if signal is not None:\n # 'signal' is what we want to unfold, e.g., ttbar\n labels += ['signal']\n weights += [list(signal)]\n bincenters += [list(center)]\n\n # plot backgrounds & signal\n d,bb,pp = plt.hist(bincenters,weights=weights,stacked=True,\n histtype='stepfilled',label=labels,\n edgecolor='k',bins=bins)\n\n # plot the data as error bars\n plt.errorbar(center,data,color='k',\n fmt='o',yerr=np.sqrt(data),\n label='Data')\n\n plt.ylim(ymin=0,ymax=plt.ylim()[1]*1.6) # scale the y-axis to accommodate the legend\n plt.legend()\n plt.xlabel(\"Distribution\")\n plt.ylabel(\"Events\")\n\n return", "def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()", "def plot_1(ecg, sample_rate=500, title = 'ECG'):\n plt.figure(figsize=(15,2))\n plt.suptitle(title)\n plt.subplots_adjust(\n hspace = 0, \n wspace = 0.04,\n left = 0.04, # the left side of the subplots of the figure\n right = 0.98, # the right side of the subplots of the figure\n bottom = 0.2, # the bottom of the subplots of the figure\n top = 0.88\n )\n seconds = len(ecg)/sample_rate\n\n ax = plt.subplot(1, 1, 1)\n step = 1.0/sample_rate\n _ax_plot(ax,np.arange(0,len(ecg)*step,step),ecg, seconds)", "def plot(\n ecg, \n sample_rate = 500, \n title = 'ECG 12', \n lead_index = lead_index, \n lead_order = None,\n style = None,\n columns = 2,\n row_height = 6,\n show_lead_name = True,\n show_grid = True,\n show_separate_line = True,\n ):\n\n if not lead_order:\n lead_order = list(range(0,len(ecg)))\n secs = len(ecg[0])/sample_rate\n leads = len(lead_order)\n rows = ceil(leads/columns)\n # display_factor = 2.5\n display_factor = 1\n line_width = 0.5\n fig, ax = plt.subplots(figsize=(secs*columns * display_factor, rows * row_height / 5 * display_factor))\n display_factor = display_factor ** 0.5\n fig.subplots_adjust(\n hspace = 0, \n wspace = 0,\n left = 0, # the left side of the subplots of the figure\n right = 1, # the right side of the subplots of the figure\n bottom = 0, # the bottom of the subplots of the figure\n top = 1\n )\n\n fig.suptitle(title)\n\n x_min = 0\n x_max = columns*secs\n y_min = row_height/4 - (rows/2)*row_height\n y_max = row_height/4\n\n if (style == 'bw'):\n color_major = (0.4,0.4,0.4)\n color_minor = (0.75, 0.75, 0.75)\n color_line = (0,0,0)\n else:\n color_major = (1,0,0)\n color_minor = (1, 0.7, 0.7)\n color_line = (0,0,0.7)\n\n if(show_grid):\n ax.set_xticks(np.arange(x_min,x_max,0.2)) \n ax.set_yticks(np.arange(y_min,y_max,0.5))\n\n ax.minorticks_on()\n \n ax.xaxis.set_minor_locator(AutoMinorLocator(5))\n\n ax.grid(which='major', linestyle='-', linewidth=0.5 * display_factor, color=color_major)\n ax.grid(which='minor', linestyle='-', linewidth=0.5 * display_factor, color=color_minor)\n\n ax.set_ylim(y_min,y_max)\n ax.set_xlim(x_min,x_max)\n\n\n for c in range(0, columns):\n for i in range(0, rows):\n if (c * rows + i < leads):\n y_offset = -(row_height/2) * ceil(i%rows)\n # if (y_offset < -5):\n # y_offset = y_offset + 0.25\n\n x_offset = 0\n if(c > 0):\n x_offset = secs * c\n if(show_separate_line):\n ax.plot([x_offset, x_offset], [ecg[t_lead][0] + y_offset - 0.3, ecg[t_lead][0] + y_offset + 0.3], linewidth=line_width * display_factor, color=color_line)\n\n \n t_lead = lead_order[c * rows + i]\n \n step = 1.0/sample_rate\n if(show_lead_name):\n ax.text(x_offset + 0.07, y_offset - 0.5, lead_index[t_lead], fontsize=9 * display_factor)\n ax.plot(\n np.arange(0, len(ecg[t_lead])*step, step) + x_offset, \n ecg[t_lead] + y_offset,\n linewidth=line_width * display_factor, \n color=color_line\n )", "def diffuse_flux(self, rois=[0,888]):\n fig, ax = plt.subplots(1,1, figsize=(6,6), dpi=150, sharey=True)\n egev = np.array(self.energy)/1e3\n if rois is None: rois = self.rois\n\n for r in rois:\n gal, iso = self.get_background(r)\n ax.plot(egev, gal, '-D', label='gal %d'%r)\n ax.plot(egev, iso, '--o', label='iso %d'%r)\n plt.setp(ax, xscale='log', xlim=(0.1,300), xlabel='Energy (GeV)',\n yscale='log', ylim=(1e-1,1e6), ylabel='Diffuse counts/ROI')\n ax.legend(prop=dict(size=10)); ax.grid()\n return fig", "def plotRadarPlot(data_grouped, save=False, *args):\n #We get the name of features\n variables = data_grouped.columns\n #We get the ranges of each features\n ranges = findRanges(data_grouped)\n #We plot each cluster on a different radar (better for vizualisation\n for i in range(0, len(data_grouped)):\n #Init the figure\n fig1 = plt.figure(figsize=(6, 6))\n #Init the radar\n radar = ComplexRadar(fig1, variables, ranges)\n #Init values on the radar\n radar.plot(data_grouped.loc[i, :], ranges)\n #Fill the radar (plot looks better with that fill)\n radar.fill(data_grouped.loc[i, :], alpha=0.2)\n if save == True:\n try:\n plt.savefig(args + \"radar\" + data_grouped.loc[i, :] + \".png\")\n except NameError:\n print('Missing the path for saving')\n plt.show()", "def plot_corner(self, caxes):\n xx = np.array([self.parchain[p] for p in self.show])\n labels = [pretty.get(p, p) for p in self.show]\n spans = get_spans(None, xx, weights=self.weights)\n\n truths = self.convert(dict_to_struct(self.obs[\"mock_params\"]))\n tvec = np.array([truths[p] for p in self.show])\n caxes = allcorner(xx, labels, caxes, weights=self.weights, span=spans,\n color=self.pkwargs[\"color\"], hist_kwargs=self.hkwargs,\n psamples=tvec, samples_kwargs={\"color\": self.tkwargs[\"mfc\"], \"edgecolor\": \"k\"},\n label_kwargs=self.label_kwargs,\n tick_kwargs=self.tick_kwargs, max_n_ticks=4)\n # Plot truth\n for ax, p in zip(np.diagonal(caxes), self.show):\n ax.axvline(truths[p], marker=\"\", **self.tkwargs)\n\n # plot priors\n if self.prior_samples > 0:\n self.show_priors(np.diag(caxes), spans, smooth=0.1, **self.rkwargs)", "def plot_final_roc(prediction_matrix, model_names, y_test, PATH = None):\n plt.figure(figsize=(10, 8))\n for i, model in enumerate(model_names): \n predictions = prediction_matrix[:,i]\n fpr, tpr, threshholds = roc_curve(y_test, predictions)\n sns.set_style('darkgrid', {'axes.facecolor': '0.9'})\n lw = 2\n plt.plot(fpr, tpr,\n lw=lw, label=f'{model_names[i]} AUC: {round(auc(fpr, tpr), 3)}')\n plt.plot([0, 1], [0, 1], lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.yticks([i/20.0 for i in range(21)], size = 14)\n plt.xticks([i/20.0 for i in range(21)], rotation = 45, size = 14)\n plt.xlabel('False Positive Rate', size =16)\n plt.ylabel('True Positive Rate', size =16)\n plt.title('ROC Curve', size = 20)\n plt.legend(loc='lower right', prop = {\"size\" : 20})\n if PATH:\n plt.savefig(PATH, bbox_inches='tight', transparent = True)\n plt.show()", "def plot_feature_correlations(self):\n\n fig = plt.figure(figsize=(18,18), tight_layout=True)\n fig.suptitle('Feature correlations', fontsize=24)\n\n sns.heatmap(self.train_data.astype(float).corr(method='kendall'), linewidths=0.1, vmin=-1.0,\n vmax=1.0, square=True, linecolor='white', annot=True, \n cmap=\"PiYG\")\n plt.savefig(r'data_analysis\\correlations_kendall_' + self.file_name + '.png', \n facecolor=fig.get_facecolor())", "def plot_all(pred_tuple, filename='roc.png'):\n plt.clf()\n colors = [\"red\",\"blue\",\"green\",\"orange\",\"yellow\"]\n for (label, y, proba), color in zip(pred_tuple, colors):\n true_pos, false_pos, thresh = metrics.roc_curve(y, proba)\n plt.plot(false_pos, true_pos, label=label, linewidth=2,\n color=color)\n plt.plot([0,1],[0,1], linestyle=\"dashed\", color=\"grey\", label=\"random\")\n plt.xlim([0,1])\n plt.ylim([0,1])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"Receiver Operating Characteristic\")\n plt.legend(loc=\"lower right\")\n\n plt.show()\n plt.savefig(_plots_path + filename)", "def plot_channels(self, data_array):\n\n plt.figure()\n for p in range(1, 7):\n plt.subplot(6, 1, p)\n plt.plot(data_array[p-1, :])\n\n plt.draw()\n plt.show()\n return", "def plot_all(self):\n self.plot_ramps()\n self.plot_groupdq()", "def visualize(self, background, num_labeled=10, magnification=1.0, viz=True, cutoff=100):\n \n assert magnification >= 1.0\n lst_x, lst_y, keys = self.create_xy_table(background, cutoff=cutoff)\n fig, ax = plt.subplots()\n low, high = 0.0, round(float(1)/magnification, 1)\n ax.set_xlim(low, high)\n ax.set_ylim(low, high)\n ax.set_aspect('equal')\n ax.scatter(lst_x, lst_y)\n\n for idx, key in enumerate(keys):\n if idx > num_labeled: txt = ''\n ax.annotate(key, (lst_x[idx],lst_y[idx]))\n\n if viz:\n plt.show()\n else:\n name = self.name if self.name else 'anon'\n plt.savefig(name)", "def ShowLongitBackground(spectra,spectraUp,spectraDown,spectraAv,all_titles,all_filt,object_name,NBIMGPERROW=2,right_edge=1800):\n NBSPEC=len(spectra)\n MAXIMGROW=(NBSPEC-1) / NBIMGPERROW +1\n\n f, axarr = plt.subplots(MAXIMGROW,NBIMGPERROW,figsize=(25,5*MAXIMGROW))\n f.tight_layout()\n for index in np.arange(0,NBSPEC):\n ix=index%NBIMGPERROW\n iy=index/NBIMGPERROW\n axarr[iy,ix].plot(spectra[index],'r-')\n axarr[iy,ix].plot(spectraUp[index],'b-')\n axarr[iy,ix].plot(spectraDown[index],'g-')\n axarr[iy,ix].plot(spectraAv[index],'m-')\n thetitle=\"{}) : {} \".format(index,all_titles[index])\n axarr[iy,ix].set_title(thetitle)\n axarr[iy,ix].grid(True)\n axarr[iy,ix].set_ylim(0.,spectra[index][:right_edge].max()*1.2)\n axarr[iy,ix].annotate(all_filt[index],xy=(0.05,0.9),xytext=(0.05,0.9),verticalalignment='top', horizontalalignment='left',color='blue',fontweight='bold', fontsize=20, xycoords='axes fraction')\n title='Longitudinal background Up/Down'.format(object_name)\n plt.suptitle(title,size=16)", "def plot_roc_curve(x_data, labels, net, plotfile,\n title=''):\n\n # Have the net predict, then split the scores by ground truth\n scores = net.predict(x_data)\n\n distfile = PLOTDIR / plotfile.replace('roc', 'dist')\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 12))\n\n df = pd.DataFrame({'Condition': ['Positive' if int(i) == 1 else 'Negative'\n for i in labels[0, :]],\n 'Score': scores[0, :]})\n sns.violinplot(x='Condition', y='Score', data=df, ax=ax)\n ax.set_title('{} Dist for Rap1 Identification'.format(title))\n\n fig.savefig(str(distfile))\n\n plt.close()\n\n fp_rate, tp_rate = calc_roc(scores[labels], scores[~labels])\n\n # Make the plot\n plotfile = PLOTDIR / plotfile\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 12))\n\n ax.plot(fp_rate, tp_rate, '-o', linewidth=3)\n\n # Plot the line for perfect confusion\n ax.plot([0, 1], [0, 1], '--', linewidth=3)\n\n ax.set_title('{} ROC for Rap1 Identification'.format(title))\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n ax.set_xlim([-0.01, 1.01])\n ax.set_ylim([-0.01, 1.01])\n\n fig.savefig(str(plotfile))\n plt.close()", "def plot(self):\n\t\t\t\n\t\tfig,p1=_plt.subplots(4,sharex=True)\n\t\tp1[0].plot(self.time*1e3,self.eRogA,label='Rogowski A')\n\t\tp1[1].plot(self.time*1e3,self.eRogB,label='Rogowski B')\n\t\tp1[2].plot(self.time*1e3,self.eRogC,label='Rogowski C')\n\t\tp1[3].plot(self.time*1e3,self.eRogD,label='Rogowski D')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "def build_corr_plot():\r\n fig = plt.figure(figsize=(12, 12))\r\n ax = fig.add_subplot", "def plotROC(yscore, true, predtrue, datasets, title, outfile):\n fig = plt.figure()\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n \n for i in range(len(datasets)):\n acc = accuracy_score(true[i], predtrue[i])\n fpr, tpr, _ = roc_curve(true[i], yscore[i][:,1])\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, label=datasets[i]+' (area = %0.2f, acc = %0.2f)' % (roc_auc,acc),linewidth=2)\n \n plt.legend(loc=\"lower right\")\n \n pdfplot = PdfPages(outfile);\n pdfplot.savefig(fig)\n pdfplot.close()", "def plotPRC(yscore, true, datasets, title, outfile):\n \n fig = plt.figure()\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title(title)\n \n for i in range(len(datasets)):\n precision, recall, _ = precision_recall_curve(true[i], yscore[i][:,1])\n prc_auc = average_precision_score(true[i], yscore[i][:,1])\n plt.plot(recall, precision, label=datasets[i]+' (area = %0.2f)' % (prc_auc),linewidth=1)\n \n plt.legend(loc=\"lower right\")\n \n pdfplot = PdfPages(outfile);\n pdfplot.savefig(fig)\n pdfplot.close()", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def cross_section(R, L, F_C, show_every = 20, nr = 10, lagre = \"N\", fs = 10):\n\n R_sun = 6.96E8 # [m]\n L_sun = 3.846E26 # [W]\n\n plt.figure(figsize = (10.5, 10))\n fig = plt.gcf()\n ax = plt.gca()\n\n r_range = 1.2 * R[0] / R_sun\n rmax = np.max(R)\n\n ax.set_xlim(-r_range, r_range)\n ax.set_ylim(-r_range, r_range)\n ax.set_aspect('equal')\n\n core_limit = 0.995 * L_sun\n\n j = 0\n for k in range(0, len(R) - 1):\n j += 1\n # plot every <show_every> steps\n if j%show_every == 0:\n if L[k] >= core_limit: # outside core\n if F_C[k] > 0.0: # plot convection outside core\n circle_red = plt.Circle((0, 0), R[k] / rmax, color = 'red', fill = False)\n ax.add_artist(circle_red)\n else: # plot radiation outside core\n circle_yellow = plt.Circle((0, 0), R[k] / rmax, color = 'yellow', fill = False)\n ax.add_artist(circle_yellow)\n else: # inside core\n if F_C[k] > 0.0: # plot convection inside core\n circle_blue = plt.Circle((0, 0), R[k] / rmax, color = 'blue', fill = False)\n ax.add_artist(circle_blue)\n else: # plot radiation inside core\n circle_cyan = plt.Circle((0, 0), R[k] / rmax, color = 'cyan', fill = False)\n ax.add_artist(circle_cyan)\n\n # create legends\n circle_red = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'red', fill = True)\n circle_yellow = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'yellow', fill = True)\n circle_blue = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'blue', fill = True)\n circle_cyan = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'cyan', fill = True)\n\n ax.legend([circle_red, circle_yellow, circle_cyan, circle_blue], \\\n ['Convection outside core', 'Radiation outside core', 'Radiation inside core', 'Convection inside core'], \\\n fontsize = fs)\n plt.xlabel(r'$R/R_{\\odot}$', fontsize = fs)\n plt.ylabel(r'$R/R_{\\odot}$', fontsize = fs)\n plt.title('Cross section of star', fontsize = fs + 2)\n if lagre == \"J\":\n plt.savefig(\"Figur%02i.png\"%nr)\n\n plt.show()", "def plot(self, xar, **kwargs):\n for dim in ['longitude', 'latitude']:\n if dim not in xar.coords:\n raise KeyError(dim+' not found in coordinates!')\n\n plt.close()\n fig = plt.figure(**self.fig_kws)\n\n if not self.proj:\n self.proj = choose_proj_from_xar(xar)\n ax = plt.axes(projection=self.proj)\n\n countries = cfeature.NaturalEarthFeature(\n category='cultural',\n name='admin_0_boundary_lines_land',\n scale='50m',\n facecolor='none')\n rivers = cfeature.NaturalEarthFeature(scale='50m', category='physical',\n name='rivers_lake_centerlines',\n edgecolor='blue', facecolor='none')\n\n ax.add_feature(countries, edgecolor='grey')\n ax.coastlines('50m')\n ax.add_feature(rivers, edgecolor='blue')\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True)\n gl.xlabels_top = False\n\n if self.drainage_baisins:\n sf = Reader(\"../data/drainage_basins/Major_Basins_of_the_World.shp\")\n shape_feature = ShapelyFeature(sf.geometries(),\n self.transform, edgecolor='black')\n ax.add_feature(shape_feature, facecolor='none', edgecolor='green')\n\n # cbar_kwargs = kwargs.pop('cbar_kwargs', dict())\n subplot_kws = kwargs.pop('subplot_kws', dict())\n subplot_kws['projection'] = self.proj\n\n # choose which colormap to use: pos and neg values => RdYlGn, else inferno\n if ((xar.max()-xar.min()) > xar.max()):\n cmap = 'RdYlGn'\n else:\n cmap = 'spring_r'\n\n # colorbar preset to match height of plot\n # if 'fraction' not in cbar_kwargs: cbar_kwargs['fraction'] = 0.015\n xar.plot.pcolormesh(ax=ax, transform=self.transform,\n subplot_kws=subplot_kws,\n cmap=cmap,\n **kwargs)\n return fig, ax", "def plot_all(self) -> None:\n self.__plot_si_cf_plane()\n self.__plot_convex_hull()\n self.__plot_fixed_radius()\n self.__plot_delaunay()", "def _plot_curves(self, curves_dict):\n for name, curve in curves_dict.items():\n fig = plt.figure()\n ax = plt.gca()\n\n plot_type = name.split('_')[-1]\n ax.set_title(plot_type)\n if plot_type == 'PRC':\n precision, recall, _ = curve\n ax.step(recall, precision, color='b', alpha=0.2, where='post')\n ax.fill_between(recall, precision, step='post', alpha=0.2, color='b')\n ax.set_xlabel('Recall')\n ax.set_ylabel('Precision')\n elif plot_type == 'ROC':\n false_positive_rate, true_positive_rate, _ = curve\n ax.plot(false_positive_rate, true_positive_rate, color='b')\n ax.plot([0, 1], [0, 1], 'r--')\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n else:\n ax.plot(curve[0], curve[1], color='b')\n\n ax.set_ylim([0.0, 1.05])\n ax.set_xlim([0.0, 1.0])\n\n fig.canvas.draw()\n\n curve_img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n curve_img = curve_img.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n self.summary_writer.add_image(name.replace('_', '/'), curve_img, global_step=self.global_step)", "def plottrace_paper(moviedict, figw, figh, figdpi, fontsz, border, xlabel, ylabel, yaxisticks, \n xaxisticks, labels, lw, fs):\n \n for movie, val in moviedict.iteritems():\n os.chdir(movie)\n condition, xlim, color, inum = val\n \n fontv = matplotlib.font_manager.FontProperties(fname='/usr/share/matplotlib/mpl-data/fonts/ttf/arial.ttf')\n fontv.set_size(fontsz)\n \n print(movie)\n td = dil.load_params()\n x, roi_cols = dil.load_results(RESULTS_FILE)\n start = int(td['startshort'])\n end = int(td['endshort'])\n \n \n fig1 = plt.figure(figsize=(figw*xlim/0.6, figh), dpi=figdpi, facecolor='w', edgecolor='k')\n \n xlen = len(x[roi_cols['Mean1']][start:end])\n #print(xlen)\n xvals = np.arange(0, float(xlen)/fs, 1/float(fs))\n #print(xvals)\n \n \n ycib = x[roi_cols['Mean1']][start:end]\n ycib = [v - np.mean(ycib) for v in ycib]\n #print(ycib)\n \n ylab = x[roi_cols['Mean2']][start:end]\n ylab = [v - np.mean(ylab) for v in ylab]\n ylab = [v + 70 for v in ylab]\n \n # Plots the traces\n \n plt.plot(xvals, ylab, label='proboscis tip', linewidth=lw, color='k')\n plt.plot(xvals, ycib, label='cibarium', linewidth=lw, color='b')\n \n \n \n \n \n \n \n if labels == 'yes':\n plt.title(td['condition'], fontproperties=fontv, horizontalalignment='left')\n \n #Plots legend and removes the border around it.\n legend=plt.legend()\n #legend = plt.legend(bbox_to_anchor = (1.5, 1.6))\n legend.draw_frame(False)\n ltext = legend.get_texts() \n plt.setp(ltext, fontproperties=fontv) \n \n ax = plt.gca()\n \n #Uncomment lines below to display without top and right borders.\n \n if border == 'no':\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n pass\n elif loc in ['right','top']:\n spine.set_color('none') # don't draw spine\n else:\n raise ValueError('unknown spine location: %s'%loc)\n \n \n #Uncomment lines below to display ticks only where there are borders.\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n \n # Specifies the number of tickmarks/labels on the yaxis.\n #ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(yaxisticks)) \n ## Removes tick labels and ticks from xaxis.\n ax.axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())\n \n if labels == 'yes':\n plt.ylabel(ylabel, fontsize=fontsz, labelpad=12)\n fig1.figsize = (6, 3)\n \n # Adjusts the space between the plot and the edges of the figure; (0,0) is the lower \n #lefthand corner of the figure.\n fig1.subplots_adjust(bottom=0.3)\n fig1.subplots_adjust(left=0.05)\n fig1.subplots_adjust(right=0.95)\n fig1.subplots_adjust(top=0.95)\n \n #ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(XAXISTICKS)) \n \n #Specifies axis labels and axis tick label sizes.\n plt.xlabel(xlabel, fontproperties=fontv)\n plt.ylabel(ylabel, fontproperties=fontv)\n plt.xticks([0, 0.2, 0.4, 0.6], fontproperties=fontv)\n plt.xlim( (0, xlim+0.05) )\n #plt.yticks(fontproperties=fontv)\n \n \n \n # Saves the figures in plots/plots.\n if labels == 'no':\n plotfolder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('.'))),\n 'plots')\n makenewdir(plotfolder)\n figname = os.path.join(plotfolder, movie + '_trace_nolab')\n plt.savefig(figname+'.svg', dpi=FIGDPI, format='svg')\n plt.savefig(figname+'.png', dpi=FIGDPI, format='png')\n os.chdir('../')\n\n if labels == 'yes':\n plotfolder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('.'))),\n 'plots')\n makenewdir(plotfolder)\n figname = os.path.join(plotfolder, movie + '_trace')\n plt.savefig(figname+'.svg', dpi=FIGDPI, format='svg')\n plt.savefig(figname+'.png', dpi=FIGDPI, format='png')\n os.chdir('../')", "def roc_plot(label, fpr, tpr, roc_auc):\n plt.figure()\n for i in range(len(label)):\n plt.plot(fpr[i], tpr[i], label=label[i] + ' AUC = %0.2f' % roc_auc[i], alpha=0.75)\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([-0.01, 1.01])\n plt.ylim([-0.01, 1.01])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC Curve')\n plt.legend(loc='lower right')\n plt.show()", "def plot_regime_diagram_background_L19(\n ax=None,\n ):\n if ax is None:\n ax = plt.gca()\n # range of power\n xpr = [-1, 1]\n ypr = [-3, 3]\n # range\n xlims = [10**i for i in xpr]\n ylims = [10**i for i in ypr]\n # background following Fig. 3 of Belcher et al., 2012\n nx = 500\n ny = 500\n xx = np.logspace(xpr[0], xpr[1], nx)\n yy = np.logspace(ypr[0], ypr[1], ny)\n zz1 = np.zeros([nx, ny])\n zz2 = np.zeros([nx, ny])\n zz3 = np.zeros([nx, ny])\n for i in np.arange(nx):\n for j in np.arange(ny):\n zz1[i,j] = 2*(1-np.exp(-0.5*xx[i]))\n zz2[i,j] = 0.22*xx[i]**(-2)\n zz3[i,j] = 0.3*xx[i]**(-2)*yy[j]\n zz = zz1 + zz2 + zz3\n\n rz_ST = zz1/zz\n rz_LT = zz2/zz\n rz_CT = zz3/zz\n fr = np.ones(zz.shape) * 7\n cfrac = 0.25\n fr[(rz_LT<cfrac) & (rz_CT<cfrac)] = 1\n fr[(rz_ST<cfrac) & (rz_CT<cfrac)] = 2\n fr[(rz_ST<cfrac) & (rz_LT<cfrac)] = 3\n fr[(rz_ST>=cfrac) & (rz_LT>=cfrac) & (rz_CT<cfrac)] = 4\n fr[(rz_ST>=cfrac) & (rz_CT>=cfrac) & (rz_LT<cfrac)] = 5\n fr[(rz_LT>=cfrac) & (rz_CT>=cfrac) & (rz_ST<cfrac)] = 6\n color_list = ['firebrick','forestgreen','royalblue','gold','orchid','turquoise','w']\n cb_ticks = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]\n cmap, norm = from_levels_and_colors(cb_ticks, color_list)\n ax.contourf(xx, yy, np.transpose(fr), cmap=cmap, norm=norm)\n ax.contour(xx, yy, np.transpose(fr), colors='darkgray')\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('La$_t$')\n ax.set_ylabel('$h/L_L$')\n ax.set_aspect(aspect=1/3)\n ax.text(0.11, 4e-3, 'Langmuir', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))\n ax.text(3, 4e-3, 'Shear', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))\n ax.text(0.13, 1e2, 'Convection', bbox=dict(boxstyle=\"square\",ec='k',fc='w'))", "def plot_roc_curves(labels, probas, name='', ax=None):\n # Setup axis\n if ax is None:\n fig, ax = plt.subplots(figsize=(20, 10))\n\n plot_roc(labels, probas, name=name, ax=ax)\n\n # Plot chance\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='black', alpha=.8)\n\n # Fill bottom right\n ax.fill_between([0, 1], [0, 1], alpha=0.3, color='black')\n\n # Settings\n ax.set_xlabel('False Positive Rate or (1 - Specifity)', fontsize=15)\n ax.set_ylabel('True Positive Rate or (Sensitivity)', fontsize=15)\n ax.set_title('Receiver Operating Characteristic', weight='bold', fontsize=18)\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n ax.legend(loc='lower right')\n\n return ax", "def debugplots(fig,data):\n grid = AxesGrid(fig, 111, # similar to subplot(142)\n nrows_ncols=(1, 6),\n axes_pad=0.0,\n share_all=True,\n label_mode=\"L\",\n cbar_location=\"right\",\n cbar_mode=\"none\",\n )\n\n Z0=data[0].real\n Z1=data[1].real\n Z2=data[2].real\n Z3=data[3].real\n Z4=data[4].real\n Z5=data[5].real\n \n Z=[Z0,Z1,Z2,Z3,Z4,Z5]\n \n for i in range(6):\n grid[i].set_title(r\"$t=%u\\Delta t$\"%(Timestep(i)),color='black',horizontalalignment='center',verticalalignment='bottom')\n im = grid[i].imshow(Z[i], extent=(-2, 2, -2, 2), interpolation=\"Nearest\",origin=\"lower\",cmap='seismic',vmin=-1,vmax=1)\n grid[i].set_aspect(ratio)\n grid[i].set_xlabel(\"$x/10$\",size=16)\n grid[0].set_ylabel(\"$y/10$\",size=16)\n pos2 = [0.905,0.25,0.01,0.5]\n position = fig.add_axes(pos2)\n fig.colorbar(im, ax=grid[2],cax=position,extend=\"both\")\n \n for cax in grid.cbar_axes:\n cax.toggle_label(True)\n \n # This affects all axes as share_all = True.\n grid.axes_llc.set_xticks([-2,-1, 0,1])\n #grid[0].set_xticks([-20,-10, 0,10, 20])\n grid.axes_llc.set_yticks([-2, -1, 0, 1,2])", "def plotFilled(self):\n minc = 70\n maxc = 120\n num = 25\n levels = np.linspace(minc,maxc,num+1)\n title = \"Orography difference between LGM and Modern ICE-5G data\"\n plt.figure()\n plt.contourf(self.difference_in_ice_5g_orography,levels=levels)\n plt.title(title)\n pts.set_ticks_to_zero()\n cbar = plt.colorbar()\n cbar.ax.set_ylabel('Orography difference in meters')\n #if self.save:\n #plt.savefig('something')\n print(\"Filled contour plot created\")", "def plot_data(self):", "def plot(\n self,\n color_map={\n \"ex\": (1, 0.2, 0.2),\n \"ey\": (1, 0.5, 0),\n \"hx\": (0, 0.5, 1),\n \"hy\": (0.5, 0.2, 1),\n \"hz\": (0.2, 1, 1),\n },\n channel_order=None,\n ):\n\n if channel_order is not None:\n ch_list = channel_order()\n else:\n ch_list = self.channels\n\n n_channels = len(self.channels)\n\n fig = plt.figure()\n fig.subplots_adjust(hspace=0)\n ax_list = []\n for ii, comp in enumerate(ch_list, 1):\n try:\n color = color_map[comp]\n except KeyError:\n color = (0, 0.4, 0.8)\n if ii == 1:\n ax = plt.subplot(n_channels, 1, ii)\n else:\n ax = plt.subplot(n_channels, 1, ii, sharex=ax_list[0])\n self.dataset[comp].plot.line(ax=ax, color=color)\n ax.grid(which=\"major\", color=(0.65, 0.65, 0.65), ls=\"--\", lw=0.75)\n ax.grid(which=\"minor\", color=(0.85, 0.85, 0.85), ls=\"--\", lw=0.5)\n ax.set_axisbelow(True)\n if ii != len(ch_list):\n plt.setp(ax.get_xticklabels(), visible=False)\n\n ax_list.append(ax)", "def plot(self):\n\n fig, ax = plt.subplots()\n\n for run in self.runs:\n # Load datasets\n data_measure = run.get_dataset(\"stats-collect_link_congestion-raw-*.csv\")\n data_sp = run.get_dataset(\"stats-collect_link_congestion-sp-*.csv\")\n\n # Extract link congestion information\n data_measure = data_measure['msgs']\n data_sp = data_sp['msgs']\n\n # Compute ECDF and plot it\n ecdf_measure = sm.distributions.ECDF(data_measure)\n ecdf_sp = sm.distributions.ECDF(data_sp)\n\n variable_label = \"\"\n size = run.orig.settings.get('size', None)\n if size is not None:\n variable_label = \" (n=%d)\" % size\n\n ax.plot(ecdf_measure.x, ecdf_measure.y, drawstyle='steps', linewidth=2,\n label=\"U-Sphere%s\" % variable_label)\n ax.plot(ecdf_sp.x, ecdf_sp.y, drawstyle='steps', linewidth=2,\n label=u\"Klasični usmerjevalni protokol%s\" % variable_label)\n\n ax.set_xlabel('Obremenjenost povezave')\n ax.set_ylabel('Kumulativna verjetnost')\n ax.grid()\n ax.axis((28, None, 0.99, 1.0005))\n self.convert_axes_to_bw(ax)\n\n legend = ax.legend(loc='lower right')\n if self.settings.GRAPH_TRANSPARENCY:\n legend.get_frame().set_alpha(0.8)\n fig.savefig(self.get_figure_filename())", "def roi_curves(self, data):\n if not data or not any(len(d) for d in data.values()):\n self.roi_traces = None\n default_curve = hv.Curve([], 'Spectrum', 'CL').opts(color='red') \n return hv.NdOverlay({0: default_curve}).opts(show_legend=False) # code breaks without using a curve in ndoverlay\n \n curves = {}\n data = zip(data['x0'], data['x1'], data['y0'], data['y1'])\n self.roi_traces = []\n for i, (x0, x1, y0, y1) in enumerate(data):\n selection = self.xds.sel(x=slice(x0, x1), y=slice(y1, y0))\n selection_avg = selection.mean(['x','y'])\n self.roi_traces.append(selection_avg)\n if self.roi_toggle == 'Trans': # apparently param knows when this changes without having to make it a 'stream' var\n if i == 0:\n substrate = selection_avg.copy()\n selection_avg /= substrate\n curves[i] = hv.Curve(selection_avg)\n \n color_cycle_opts = opts.Curve(color= hv.Cycle(self.color_cycle))\n return hv.NdOverlay(curves).opts(color_cycle_opts)", "def ppg_plot(ppg_signals, sampling_rate=None, static=True):\n\n # Sanity-check input.\n if not isinstance(ppg_signals, pd.DataFrame):\n raise ValueError(\n \"NeuroKit error: The `ppg_signals` argument must\"\n \" be the DataFrame returned by `ppg_process()`.\"\n )\n # X-axis\n if sampling_rate is not None:\n x_axis = np.linspace(0, ppg_signals.shape[0] / sampling_rate, ppg_signals.shape[0])\n else:\n x_axis = np.arange(0, ppg_signals.shape[0])\n\n # Get peak indices\n peaks = np.where(ppg_signals[\"PPG_Peaks\"] == 1)[0]\n\n if static:\n # Prepare figure\n fig, (ax0, ax1) = plt.subplots(nrows=2, ncols=1, sharex=True)\n if sampling_rate is not None:\n ax0.set_xlabel(\"Time (seconds)\")\n ax1.set_xlabel(\"Time (seconds)\")\n elif sampling_rate is None:\n ax0.set_xlabel(\"Samples\")\n ax1.set_xlabel(\"Samples\")\n fig.suptitle(\"Photoplethysmogram (PPG)\", fontweight=\"bold\")\n plt.tight_layout(h_pad=0.4)\n\n # Plot cleaned and raw PPG\n ax0.set_title(\"Raw and Cleaned Signal\")\n ax0.plot(x_axis, ppg_signals[\"PPG_Raw\"], color=\"#B0BEC5\", label=\"Raw\", zorder=1)\n ax0.plot(\n x_axis,\n ppg_signals[\"PPG_Clean\"],\n color=\"#FB1CF0\",\n label=\"Cleaned\",\n zorder=1,\n linewidth=1.5,\n )\n\n # Plot peaks\n ax0.scatter(\n x_axis[peaks],\n ppg_signals[\"PPG_Clean\"][peaks],\n color=\"#D60574\",\n label=\"Peaks\",\n zorder=2,\n )\n ax0.legend(loc=\"upper right\")\n\n # Rate\n ax1.set_title(\"Heart Rate\")\n ppg_rate_mean = ppg_signals[\"PPG_Rate\"].mean()\n ax1.plot(\n x_axis,\n ppg_signals[\"PPG_Rate\"],\n color=\"#FB661C\",\n label=\"Rate\",\n linewidth=1.5,\n )\n ax1.axhline(y=ppg_rate_mean, label=\"Mean\", linestyle=\"--\", color=\"#FBB41C\")\n ax1.legend(loc=\"upper right\")\n return fig\n else:\n try:\n import plotly.graph_objects as go\n from plotly.subplots import make_subplots\n\n except ImportError as e:\n raise ImportError(\n \"NeuroKit error: ppg_plot(): the 'plotly'\",\n \" module is required when 'static' is False.\",\n \" Please install it first (`pip install plotly`).\",\n ) from e\n\n fig = make_subplots(\n rows=2,\n cols=1,\n shared_xaxes=True,\n subplot_titles=(\"Raw and Cleaned Signal\", \"Rate\"),\n )\n\n # Plot cleaned and raw PPG\n fig.add_trace(go.Scatter(x=x_axis, y=ppg_signals[\"PPG_Raw\"], name=\"Raw\"), row=1, col=1)\n fig.add_trace(\n go.Scatter(\n x=x_axis,\n y=ppg_signals[\"PPG_Clean\"],\n name=\"Cleaned\",\n marker_color=\"#FB1CF0\",\n ),\n row=1,\n col=1,\n )\n\n # Plot peaks\n fig.add_trace(\n go.Scatter(\n x=x_axis[peaks],\n y=ppg_signals[\"PPG_Clean\"][peaks],\n name=\"Peaks\",\n mode=\"markers\",\n marker_color=\"#D60574\",\n ),\n row=1,\n col=1,\n )\n\n # Rate\n ppg_rate_mean = ppg_signals[\"PPG_Rate\"].mean()\n fig.add_trace(\n go.Scatter(\n x=x_axis,\n y=ppg_signals[\"PPG_Rate\"],\n name=\"Rate\",\n mode=\"lines\",\n marker_color=\"#FB661C\",\n ),\n row=2,\n col=1,\n )\n fig.add_hline(\n y=ppg_rate_mean,\n line_dash=\"dash\",\n line_color=\"#FBB41C\",\n name=\"Mean\",\n row=2,\n col=1,\n )\n fig.update_layout(title_text=\"Photoplethysmogram (PPG)\", height=500, width=750)\n if sampling_rate is not None:\n fig.update_xaxes(title_text=\"Time (seconds)\", row=1, col=1)\n fig.update_xaxes(title_text=\"Time (seconds)\", row=2, col=1)\n elif sampling_rate is None:\n fig.update_xaxes(title_text=\"Samples\", row=1, col=1)\n fig.update_xaxes(title_text=\"Samples\", row=2, col=1)\n return fig", "def plot_ECG_peaks(ecg_df, r_peaks):\n\t# peak_times = ecg_df.iloc[r_peaks]['timestamp']\n\tpeak_times = r_peaks\n\tecg_df['is_peak'] = ecg_df['timestamp'].isin(peak_times) \n\n\tplt.figure()\n\tplt.plot(ecg_df['timestamp'], ecg_df['ecg'])\n\tplt.plot(ecg_df[ecg_df['is_peak']]['timestamp'], ecg_df[ecg_df['is_peak']]['ecg'], 'ro')\n\tplt.title('Detected R-peaks on ECG Data')\n\tplt.xlabel('Time (ms)')\n\tplt.ylabel('Voltage (mV)')\n\tplt.show()", "def plot_figure10():\n height_ceiling = 500.\n height_ceiling_id = list(height_range_ceilings).index(height_ceiling)\n\n plot_item00 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_id, :, :],\n 'contour_fill_levels': np.linspace(50, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(50, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n plot_item01 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank300\"][height_ceiling_id, :, :],\n 'contour_fill_levels': np.linspace(0, 80, 21),\n 'contour_line_levels': np.linspace(0, 80, 21)[::4][2:],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(0, 80, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n }\n plot_item02 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank1600\"][height_ceiling_id, :, :],\n 'contour_fill_levels': np.linspace(0, 45, 21),\n 'contour_line_levels': np.linspace(0, 45, 21)[::4][2:],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(0, 45, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n }\n\n column_titles = [\"40 $W/m^2$\", \"300 $W/m^2$\", \"1600 $W/m^2$\"]\n plot_items = [plot_item00, plot_item01, plot_item02]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)\n\n plot_item10 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_id, :, :])-\n (100.-nc.variables[\"p_fixed_rank40\"][0, :, :]),\n 'contour_fill_levels': np.linspace(0., 22., 21),\n 'contour_line_levels': sorted([1.1, 2.2]+list(np.linspace(0., 22., 21)[::4][:-2])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': np.linspace(0., 22., 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n plot_item11 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank300\"][height_ceiling_id, :, :])-\n (100.-nc.variables[\"p_fixed_rank300\"][0, :, :]),\n 'contour_fill_levels': np.linspace(0., 31., 21),\n 'contour_line_levels': np.linspace(0., 31., 21)[::4][:-2],\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': np.linspace(0., 31., 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n plot_item12 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank1600\"][height_ceiling_id, :, :])-\n (100.-nc.variables[\"p_fixed_rank1600\"][0, :, :]),\n 'contour_fill_levels': np.linspace(0., 26., 21),\n 'contour_line_levels': np.linspace(0., 26., 21)[::4][:-2],\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': np.linspace(0., 26., 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n\n column_titles = None\n plot_items = [plot_item10, plot_item11, plot_item12]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)", "def plot_clusters(self):\n pass", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def plot_roc_curve(ht, scores, tp_label='tp', fp_label='fp', colors=None, title='ROC Curve', hover_mode='mouse'):\n if colors is None:\n # Get a palette automatically\n from bokeh.palettes import d3\n palette = d3['Category10'][max(3, len(scores))]\n colors = {score: palette[i] for i, score in enumerate(scores)}\n\n if isinstance(scores, str):\n scores = [scores]\n total_tp, total_fp = ht.aggregate((hl.agg.count_where(ht[tp_label]), hl.agg.count_where(ht[fp_label])))\n\n p = figure(title=title, x_axis_label='FPR', y_axis_label='TPR', tools=\"hover,save,pan,box_zoom,reset,wheel_zoom\")\n p.add_layout(Title(text=f'Based on {total_tp} TPs and {total_fp} FPs'), 'above')\n\n aucs = []\n for score in scores:\n ordered_ht = ht.key_by(_score=-ht[score])\n ordered_ht = ordered_ht.select(\n score_name=score, score=ordered_ht[score],\n tpr=hl.scan.count_where(ordered_ht[tp_label]) / total_tp,\n fpr=hl.scan.count_where(ordered_ht[fp_label]) / total_fp,\n ).key_by().drop('_score')\n last_row = hl.utils.range_table(1).key_by().select(score_name=score, score=hl.float64(float('-inf')), tpr=hl.float64(1.0), fpr=hl.float64(1.0))\n ordered_ht = ordered_ht.union(last_row)\n ordered_ht = ordered_ht.annotate(\n auc_contrib=hl.or_else((ordered_ht.fpr - hl.scan.max(ordered_ht.fpr)) * ordered_ht.tpr, 0.0)\n )\n auc = ordered_ht.aggregate(hl.agg.sum(ordered_ht.auc_contrib))\n aucs.append(auc)\n df = ordered_ht.annotate(score_name=ordered_ht.score_name + f' (AUC = {auc:.4f})').to_pandas()\n p.line(x='fpr', y='tpr', legend_field='score_name', source=ColumnDataSource(df), color=colors[score], line_width=3)\n\n p.legend.location = 'bottom_right'\n p.legend.click_policy = 'hide'\n p.select_one(HoverTool).tooltips = [(x, f\"@{x}\") for x in ('score_name', 'score', 'tpr', 'fpr')]\n p.select_one(HoverTool).mode = hover_mode\n return p, aucs", "def plotallbands(_zband, _yband, _jband, _hband, _kband, _period):\n # Set pyplot style to be consisten within the program\n plt.style.use('seaborn-whitegrid')\n # Frequency = 1 / Period\n _freq = 1 / _period\n\n # Create single dataset from all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n # Iterate through each band and plot to screen\n i = 0\n while i < 5:\n # Array to set colours for each band\n _colours = ['-b', '-g', '-r', '-c', '-m']\n # Array to set strings for graph legend\n _legend = ['Z-band', 'Y-band', 'J-band', 'H-band', 'K-band']\n # Determine the line of best fit for each band\n _xfit, _lobf = calclobf(_bands[i], _period)\n # Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit\n plt.plot(_xfit, _lobf, _colours[i], lw=1, zorder=2, label=_legend[i])\n i += 1\n\n # Set x-axis limit to a single period\n plt.xlim(0, 1)\n # Set graph and axis titles\n plt.xlabel(\"Phase\")\n plt.ylabel(\"Magnitude\")\n plt.title(\"Folded light curve\")\n # Show the legend\n plt.legend()\n # Invert y-axis as convention\n plt.gca().invert_yaxis()\n # Save to current folder\n plt.savefig('curve.png')\n # Display to screen\n plt.show()", "def plot_ROC(self, canvas):\n\t\tfpr = self.fpr_\n\t\ttpr = self.tpr_\n\t\tauc = self.class_auroc_\n\t\tclasses = self.classes_\n\t\tnum_classes = self.num_classes_\n\n\t\trcParams.update({'font.size': 7})\n\t\tcanvas.figure.clear()\n\t\tax = canvas.figure.subplots()\n\n\t\tax.plot(fpr['avg'], tpr['avg'], label='avg (area={0})'.format(self.avg_auroc_), \\\n\t\t\tcolor = 'black', linewidth=2, linestyle='--')\n\n\t\tcolors = cycle(['blue', 'orange', 'green', 'red', 'yellow', 'purple', 'cyan'])\n\t\tfor i, color in zip(range(0, num_classes), colors):\n\t\t\tax.plot(fpr[i], tpr[i], label='{0} (area={1})'.format(classes[i], auc[classes[i]]), \\\n\t\t\t\tcolor=color, linewidth=1)\n\n\t\tax.plot([0 ,1], [0, 1], color='lightgray', linewidth=1, linestyle='--')\n\t\tax.set_xlim([0.0, 1.0])\n\t\tax.set_ylim([0.0, 1.05])\n\t\tax.set_xlabel('FPR')\n\t\tax.set_ylabel('TPR')\n\t\tax.legend(loc='lower right')\n\n\t\tcanvas.figure.tight_layout()\n\t\tcanvas.draw()", "def combine_plot(qa_out_path,brain_path):\n \n #Get the scan volume of the brain.\n brain_ref = nib.load(brain_path)\n brain_ref_shape = brain_ref.shape[0:3]\n \n plots_list = ['Rotate_Z_axis_000000.png','Rotate_Z_axis_000001.png','Rotate_Z_axis_000002.png',\n 'Rotate_Y_axis_000000.png','Rotate_Y_axis_000001.png','Rotate_Y_axis_000002.png',\n 'Rotate_X_axis_000000.png','Rotate_X_axis_000001.png','Rotate_X_axis_000002.png']\n y_labels = [\"Rotate with Z axis\",\"Rotate with Y axis\",\"Rotate with X axis\"]\n x_labels = [\"angle=0\",\"angle=120\",\"angle=240\"]\n \n #Temporary list to store the image nparray:\n im_arr=[] \n \n fig= plt.figure()\n plt.title(f'QA_tractography. Scan volume = {brain_ref_shape} \\n\\n', fontsize=60,fontweight='bold')\n plt.xticks([])\n plt.yticks([])\n plt.axis(\"off\")\n\n j = 0\n for i in range(9):\n #Load in the nine images into a nparray one by one.\n im_arr = np.array(Image.open(qa_out_path + \"/\" + plots_list[i]))\n #Change the background of the image into black:\n im_arr = np.where(im_arr<=0.01, 255, im_arr) \n ax = fig.add_subplot(3,3,i+1)\n ax.imshow(im_arr,interpolation=\"none\",alpha=0.9)\n \n #Set the X labels and Y labels\n if i<3:\n ax.set_title(x_labels[i],fontsize=60,fontweight='bold')\n if i % 3 == 0:\n ax.set_ylabel(y_labels[j],fontsize=60,fontweight='bold')\n j = j + 1\n plt.xticks([])\n plt.yticks([])\n \n fig.set_size_inches(40, 40, forward = True)\n fig.savefig(qa_out_path + \"/\" + 'qa_tractography.png', format='png')\n\n #Delete the Nine images which used to generate the qa_tractography.png \n for plot in plots_list:\n if os.path.exists(qa_out_path + \"/\" + plot):\n os.remove(qa_out_path + \"/\" + plot)\n else:\n print('No such file generated from streamlines window. Please check if the streamline.trk files is generated from the pipeline correctly or not')", "def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()", "def plot_autocorrs(self, axis=0, n_rows=4, n_cols=8):\n self.current_plot = 'multi'\n self.ax_zoomed = False\n \n bls = self.uv.d_uv_data['BASELINE']\n\n # Extract the relevant baselines using a truth array\n # bls = bls.tolist()\n bl_ids = set([256*i + i for i in range(1, n_rows * n_cols + 1)])\n bl_truths = np.array([(b in bl_ids) for b in bls])\n \n #print self.uv.d_uv_data['DATA'].shape\n #x_data = self.d_uv_data['DATA'][bl_truths,0,0,:,0,axis] # Baselines, freq and stokes\n #x_cplx = x_data[:,:,0] + 1j * x_data[:,:,1]\n\n x_cplx = self.stokes[axis][bl_truths]\n\n\n \n # Plot the figure\n #print self.uv.n_ant\n fig = self.sp_fig\n figtitle = '%s %s: %s -- %s'%(self.uv.telescope, self.uv.instrument, self.uv.source, self.uv.date_obs)\n for i in range(n_rows):\n for j in range(n_cols):\n ax = fig.add_subplot(n_rows, n_cols, i*n_cols + j +1)\n ax.set_title(self.uv.d_array_geometry['ANNAME'][i*n_cols + j], fontsize=10)\n #ax.set_title(\"%s %s\"%(i, j))\n \n x = x_cplx[i*n_cols+j::self.uv.n_ant]\n \n if self.scale_select.currentIndex() == 0 or self.scale_select.currentIndex() == 1:\n if x.shape[0] == self.uv.n_ant:\n self.plot_spectrum(ax, x, label_axes=False)\n else:\n self.plot_spectrum(ax, x, stat='max', label_axes=False)\n self.plot_spectrum(ax, x, stat='med', label_axes=False)\n self.plot_spectrum(ax, x, stat='min', label_axes=False)\n else:\n self.plot_spectrum(ax, x, label_axes=False)\n self.updateFreqAxis(ax)\n \n if i == n_rows-1:\n ax.set_xlabel('Freq')\n if j == 0:\n ax.set_ylabel('Amplitude')\n \n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.xticks(rotation=30)\n \n plt.subplots_adjust(left=0.05, right=0.98, top=0.95, bottom=0.1, wspace=0.3, hspace=0.45)\n return fig, ax", "def plot_examples(cms):\r\n data = amp_axis\r\n\r\n fig, axs = plt.subplots(1, 2, figsize=(30, 8)) #create two plots\r\n for [ax, cmap] in zip(axs, cms):\r\n psm = ax.pcolormesh(time_axis, tof_axis, data, cmap=cmap, rasterized=True, vmin = 250) #specify axis and minimum amplitude value to show on the graph\r\n fig.colorbar(psm, ax=ax, label = 'Amplitude') #define the legend of the amplitude data\r\n \r\n ax.set_ylabel('Time of Flight [\\u03bcs]') #set label for y axis\r\n ax.set_xlabel('Time [min]') #set label for x axis\r\n \r\n ax.hlines(8.744, 0, stop_time, colors = 'white') #create two white lines for the safe operating range for ToF\r\n ax.hlines(9.555, 0, stop_time, colors = 'white') \r\n \r\n plt.show()", "def rsp_plot(rsp_signals, sampling_rate=None, figsize=(10, 10)):\n # Mark peaks, troughs and phases.\n peaks = np.where(rsp_signals[\"RSP_Peaks\"] == 1)[0]\n troughs = np.where(rsp_signals[\"RSP_Troughs\"] == 1)[0]\n inhale = np.where(rsp_signals[\"RSP_Phase\"] == 1)[0]\n exhale = np.where(rsp_signals[\"RSP_Phase\"] == 0)[0]\n\n nrow = 2\n if \"RSP_Amplitude\" in list(rsp_signals.columns):\n nrow += 1\n if \"RSP_RVT\" in list(rsp_signals.columns):\n nrow += 1\n if \"RSP_Symmetry_PeakTrough\" in list(rsp_signals.columns):\n nrow += 1\n\n fig, ax = plt.subplots(nrows=nrow, ncols=1, sharex=True, figsize=figsize)\n\n # Determine unit of x-axis.\n last_ax = fig.get_axes()[-1]\n if sampling_rate is not None:\n last_ax.set_xlabel(\"Time (seconds)\")\n x_axis = np.linspace(0, len(rsp_signals) / sampling_rate, len(rsp_signals))\n else:\n last_ax.set_xlabel(\"Samples\")\n x_axis = np.arange(0, len(rsp_signals))\n\n # Plot cleaned and raw respiration as well as peaks and troughs.\n ax[0].set_title(\"Raw and Cleaned Signal\")\n fig.suptitle(\"Respiration (RSP)\", fontweight=\"bold\")\n\n ax[0].plot(x_axis, rsp_signals[\"RSP_Raw\"], color=\"#B0BEC5\", label=\"Raw\", zorder=1)\n ax[0].plot(\n x_axis, rsp_signals[\"RSP_Clean\"], color=\"#2196F3\", label=\"Cleaned\", zorder=2, linewidth=1.5\n )\n\n ax[0].scatter(\n x_axis[peaks],\n rsp_signals[\"RSP_Clean\"][peaks],\n color=\"red\",\n label=\"Exhalation Onsets\",\n zorder=3,\n )\n ax[0].scatter(\n x_axis[troughs],\n rsp_signals[\"RSP_Clean\"][troughs],\n color=\"orange\",\n label=\"Inhalation Onsets\",\n zorder=4,\n )\n\n # Shade region to mark inspiration and expiration.\n exhale_signal, inhale_signal = _rsp_plot_phase(rsp_signals, troughs, peaks)\n\n ax[0].fill_between(\n x_axis[exhale],\n exhale_signal[exhale],\n rsp_signals[\"RSP_Clean\"][exhale],\n where=rsp_signals[\"RSP_Clean\"][exhale] > exhale_signal[exhale],\n color=\"#CFD8DC\",\n linestyle=\"None\",\n label=\"exhalation\",\n )\n ax[0].fill_between(\n x_axis[inhale],\n inhale_signal[inhale],\n rsp_signals[\"RSP_Clean\"][inhale],\n where=rsp_signals[\"RSP_Clean\"][inhale] > inhale_signal[inhale],\n color=\"#ECEFF1\",\n linestyle=\"None\",\n label=\"inhalation\",\n )\n\n ax[0].legend(loc=\"upper right\")\n\n # Plot rate and optionally amplitude.\n ax[1].set_title(\"Breathing Rate\")\n ax[1].plot(x_axis, rsp_signals[\"RSP_Rate\"], color=\"#4CAF50\", label=\"Rate\", linewidth=1.5)\n rate_mean = np.mean(rsp_signals[\"RSP_Rate\"])\n ax[1].axhline(y=rate_mean, label=\"Mean\", linestyle=\"--\", color=\"#4CAF50\")\n ax[1].legend(loc=\"upper right\")\n\n if \"RSP_Amplitude\" in list(rsp_signals.columns):\n ax[2].set_title(\"Breathing Amplitude\")\n\n ax[2].plot(\n x_axis, rsp_signals[\"RSP_Amplitude\"], color=\"#009688\", label=\"Amplitude\", linewidth=1.5\n )\n amplitude_mean = np.mean(rsp_signals[\"RSP_Amplitude\"])\n ax[2].axhline(y=amplitude_mean, label=\"Mean\", linestyle=\"--\", color=\"#009688\")\n ax[2].legend(loc=\"upper right\")\n\n if \"RSP_RVT\" in list(rsp_signals.columns):\n ax[3].set_title(\"Respiratory Volume per Time\")\n\n ax[3].plot(x_axis, rsp_signals[\"RSP_RVT\"], color=\"#00BCD4\", label=\"RVT\", linewidth=1.5)\n rvt_mean = np.mean(rsp_signals[\"RSP_RVT\"])\n ax[3].axhline(y=rvt_mean, label=\"Mean\", linestyle=\"--\", color=\"#009688\")\n ax[3].legend(loc=\"upper right\")\n\n if \"RSP_Symmetry_PeakTrough\" in list(rsp_signals.columns):\n ax[4].set_title(\"Cycle Symmetry\")\n\n ax[4].plot(\n x_axis,\n rsp_signals[\"RSP_Symmetry_PeakTrough\"],\n color=\"green\",\n label=\"Peak-Trough Symmetry\",\n linewidth=1.5,\n )\n ax[4].plot(\n x_axis,\n rsp_signals[\"RSP_Symmetry_RiseDecay\"],\n color=\"purple\",\n label=\"Rise-Decay Symmetry\",\n linewidth=1.5,\n )\n ax[4].legend(loc=\"upper right\")", "def plot_overview(cube=nro_12co,\n region_file='../shell_candidates/AllShells.reg', mode='peak', plotname='12co_peak_shells.png',\n interactive=False, show_shells=True, shells_highlight=None, dist=orion_dist, vmin=None, vmax=None,\n scalebar_color=\"white\", scalebar_pc = 1., scale_factor=1., pmin=0.25,\n pmax=99.75, cbar_label=r\"Peak T$_{\\rm MB}$ [K]\",\n circle_color='white', circle_linewidth=1, circle_style=\"solid\", return_fig=False, show=True,\n title=r\"$^{12}$CO Peak T$_{MB}$\", recenter=False, ra=None, dec=None, radius=None):\n try:\n cube = SpectralCube.read(cube)\n except ValueError:\n pass\n\n if mode == \"peak\":\n image = (cube.max(axis=0) * scale_factor).hdu\n \n\n if mode == \"mom0\":\n image = (cube.moment0() * scale_factor).hdu\n\n\n\n fig = FITSFigure(image)\n if show:\n fig.show_colorscale(cmap='viridis', vmin=vmin, vmax=vmax, pmin=pmin,\n pmax=pmax, interpolation='none')\n fig.tick_labels.set_yformat(\"dd:mm\")\n fig.tick_labels.set_xformat(\"hh:mm\")\n #fig.hide_yaxis_label()\n #fig.hide_ytick_labels()\n plt.title(title)\n plt.xlabel(\"RA (J2000)\")\n plt.ylabel(\"DEC (J2000)\")\n\n if show_shells:\n shell_list = get_shells(region_file=region_file)\n for i, shell in enumerate(shell_list):\n if shells_highlight:\n if i+1 in shells_highlight:\n fig.show_circles(shell.ra.value, shell.dec.value, shell.radius.value, linestyle='solid', edgecolor=circle_color,\n facecolor='none', linewidth=3)\n else:\n fig.show_circles(shell.ra.value, shell.dec.value, shell.radius.value, linestyle=circle_style, edgecolor=circle_color,\n facecolor='none', linewidth=circle_linewidth)\n else:\n fig.show_circles(shell.ra.value, shell.dec.value, shell.radius.value, linestyle=circle_style, edgecolor=circle_color,\n facecolor='none', linewidth=circle_linewidth)\n\n #RECENTER\n if recenter:\n fig.recenter(ra, dec, radius)\n\n\n #SCALEBAR\n fig.add_scalebar(206265 * scalebar_pc / (dist.to(u.pc).value * 3600), color=scalebar_color)\n fig.scalebar.set_label(\"{} pc\".format(scalebar_pc))\n\n fig.add_colorbar()\n cb = fig.colorbar\n cb.set_axis_label_text(cbar_label)\n\n if return_fig:\n return fig\n else:\n fig.save(plotname, dpi=600)", "def visualise_dataset_classifier_results(dataset_results):\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_name = \"raw_dump_{0}.txt\".format(current_time)\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/../results/\" + file_name, \"wb\") as output_file:\n output_file.write(str(dataset_results))\n sns.set(style='ticks')\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(1, 1, 1)\n markers = [\"s\", \"o\", \"^\", \"*\"]\n colors = [\"#64B3DE\", \"#1f78b4\", \"#B9B914\", \"#FBAC44\", \"#bc1659\", \"#33a02c\", \"#6ABF20\", \"#ff7f00\", \"#6a3d9a\", \"grey\", \"#b15928\", \"#e31a1c\", \"black\"]\n color_dict = {}\n index = 0\n for (_, classifier_description) in dataset_results[0][1]:\n color_dict[classifier_description] = colors[index]\n index += 1\n\n hatches = [None, \"////\", \"..\"]\n\n # Move left y-axis and bottom x-axis to centre, passing through (0,0)\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position('center')\n\n # Eliminate upper and right axes\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n\n # Show ticks in the left and lower axes only\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.set_axis_on()\n ax.spines['left'].set_color('black')\n ax.spines['bottom'].set_color('black')\n plt.xlabel(\"Change in TPR\")\n plt.ylabel(\"Change in TNR\")\n\n ax.xaxis.set_label_coords(0.1, 0.52)\n ax.yaxis.set_label_coords(0.53, 0.9)\n\n plt.ylim(-0.2, 0.2)\n plt.xlim(-0.2, 0.2)\n data_set_labels = []\n classifier_labels = []\n data_set_index = 0\n for (data_set, dataset_result) in dataset_results:\n data_set_labels.append(mlines.Line2D(range(1), range(1), color=\"white\", marker=markers[data_set_index], markeredgecolor=\"black\", markeredgewidth=1.0, label=data_set.replace(\"_\", \" \")))\n median_true_pos = np.median(np.array([result_arr[3] for (result_arr, classifier_description) in dataset_result]))\n median_true_neg = np.median(np.array([result_arr[4] for (result_arr, classifier_description) in dataset_result]))\n\n i = 0\n for (result_arr, classifier_description) in dataset_result:\n if data_set_index == 0:\n classifier_labels.append(mpatches.Patch(facecolor=color_dict[classifier_description], hatch=hatches[i % len(hatches)], label=classifier_description, alpha=0.8, edgecolor=\"black\"))\n ax.scatter(result_arr[3] - median_true_pos, result_arr[4] - median_true_neg, marker=markers[data_set_index], hatch=hatches[i % len(hatches)], s=200, alpha=0.8, color=colors[i],\n edgecolor=\"black\", zorder=data_set_index, lw=0.8)\n i += 1\n data_set_index += 1\n\n plt.legend(handles=data_set_labels + classifier_labels)\n sns.despine()\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/classifier_dataset_plt_{0}.png\".format(current_time), bbox_inches='tight')\n plt.close(fig)", "def plot_pc_curves_together(binary_model, ova_model, multi_model, indices):\n binary_range_metrics = binary_model.compute_probability_range_metrics(\n binary_model.results)\n ova_range_metrics = ova_model.compute_probability_range_metrics(\n ova_model.results)\n multi_range_metrics = multi_model.compute_probability_range_metrics(\n multi_model.results)\n\n class_labels = ova_model.class_labels\n f, ax = plt.subplots(nrows=len(indices),\n ncols=3,\n sharex=True, sharey=True,\n figsize=(FIG_WIDTH, 10),\n dpi=DPI)\n\n y_indices = [0, 0.2, 0.4, 0.6, 0.8, 1]\n y_ticks = [\"0\", \"20\", \"40\", \"60\", \"80\", \"\"]\n plot_index = 0\n for class_index, class_name in enumerate(class_labels):\n if class_index not in indices:\n continue\n\n if plot_index == 0:\n # Add titles to top of plots\n ax[plot_index][0].set_title(\"Binary\", fontsize=TICK_S)\n ax[plot_index][1].set_title(\"OVA\", fontsize=TICK_S)\n ax[plot_index][2].set_title(\"Multi\", fontsize=TICK_S)\n\n plot_model_curves(class_name, binary_model,\n binary_range_metrics, ax[plot_index][0])\n plot_model_curves(class_name, ova_model, ova_range_metrics, ax[plot_index][1])\n mirror_ax = plot_model_curves(\n class_name, multi_model, multi_range_metrics, ax[plot_index][2])\n\n ax[plot_index][0].set_yticks(ticks=y_indices)\n ax[plot_index][0].set_yticklabels(labels=y_ticks, color=P_BAR_COLOR)\n mirror_ax.set_yticks(ticks=y_indices)\n mirror_ax.set_yticklabels(labels=y_ticks, color=C_BAR_COLOR)\n ax[plot_index][0].tick_params(axis='both', direction='in', labelsize=10)\n ax[plot_index][1].tick_params(axis='both', direction='in')\n ax[plot_index][2].tick_params(axis='both', direction='in', labelsize=10)\n\n mpl.rcParams['font.serif'] = ['times', 'times new roman']\n mpl.rcParams['font.family'] = 'serif'\n pretty_class_name = clean_class_name(class_name)\n ax[plot_index][0].text(0, 0.85, pretty_class_name, fontsize=14)\n plot_index += 1\n\n x_indices = np.linspace(0, 1, 11)[:-1]\n\n plt.xticks(x_indices, [\"\", \"10\", \"\", \"30\", \"\", \"50\", \"\", \"70\", \"\", \"90\"])\n rc('text', usetex=True)\n f.text(0.5, 0.08, r'Probability $\\geq$X\\%', fontsize=TICK_S, ha='center')\n bp = \"Balanced \" if binary_model.balanced_purity else \"\"\n f.text(0.03, .5, bp + 'Purity (\\%)',\n fontsize=TICK_S, va='center', rotation='vertical', color=P_BAR_COLOR)\n f.text(0.98, .5, 'Completeness (\\%)',\n fontsize=TICK_S, va='center', rotation='vertical', color=C_BAR_COLOR)\n\n plt.subplots_adjust(wspace=0, hspace=0)\n\n f.savefig(\"../output/custom_figures/merged_pc_curves_\" +\n str(indices) + \".pdf\", bbox_inches='tight')\n plt.show()", "def plot_roc(preds, labels, title=\"Receiver operating characteristic\"):\n\n # Compute values for curve\n fpr, tpr, _ = roc_curve(labels, preds)\n\n # Compute FPR (95% TPR)\n tpr95 = fpr_at_95_tpr(preds, labels)\n\n # Compute AUROC\n roc_auc = auroc(preds, labels)\n\n # Draw the plot\n plt.figure()\n lw = 2\n plt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='AUROC = %0.2f' % roc_auc)\n plt.plot([0, 1], [0.95, 0.95], color='black', lw=lw, linestyle=':', label='FPR (95%% TPR) = %0.2f' % tpr95)\n plt.plot([tpr95, tpr95], [0, 1], color='black', lw=lw, linestyle=':')\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--', label='Random detector ROC')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n plt.legend(loc=\"lower right\")\n plt.show()", "def visualise(self):\n\n # Initialise figure\n params = {\"figure.figsize\": (5, 5)}\n pylab.rcParams.update(params)\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111)\n\n # Add particles if selected\n print(self.crds)\n cmap=cm.get_cmap('coolwarm')\n norm=Normalize(0,20)\n print(np.max(self.radii))\n print(np.max(self.weights))\n if self.vis_particles:\n if self.vis_vortype==-2:\n radii=self.weights\n if self.param>10:\n self.param=(self.param-10)/2+10\n colour=cmap(norm(self.param))\n else:\n radii=self.radii\n radii=self.weights\n colour='orange'\n colour=(0.8,0.687,0.287,1)\n colour='gold'\n patches = []\n patches_pnts = []\n patches_absent = []\n for i,c in enumerate(self.crds):\n patches.append(Circle(c,radius=radii[i]))\n if radii[i]>0:\n patches_pnts.append(Circle(c,radius=0.1))\n else:\n patches_absent.append(Circle(c,radius=0.1))\n self.ax.add_collection(PatchCollection(patches, facecolor=colour, edgecolor='k', alpha=0.5))\n self.ax.add_collection(PatchCollection(patches_pnts, facecolor='k', alpha=1,zorder=1))\n if self.vis_vortype==2:\n self.ax.add_collection(PatchCollection(patches_absent, facecolor='k', alpha=0.5,zorder=1))\n else:\n self.ax.add_collection(PatchCollection(patches_absent, facecolor='k', alpha=1,zorder=1))\n\n # Add voronoi\n if self.vis_vortype!=0:\n patches = []\n colours = []\n if self.vis_cellcolour==1:\n cell_colours = self.init_cell_colours()\n else:\n cell_colours = [(0,0,0,0)]*100\n for i in range(self.m):\n patches.append(Polygon(self.rings[i],True))\n colours.append(cell_colours[self.rings[i][:,0].size])\n self.ax.add_collection(PatchCollection(patches, facecolor=colours, edgecolor='k', linewidth=1, zorder=0))\n\n # Sandbox\n # print(np.max(self.radii))\n # cmap=cm.get_cmap('coolwarm')\n # norm=Normalize(0,np.max(20))\n sandbox=False\n if sandbox:\n # z=16\n # w=np.zeros_like(self.radii)\n # mask=2*self.radii>z\n # w[mask]=z**0.5*np.sqrt(2*self.radii[mask]-z)\n # patches = []\n # for i,c in enumerate(self.crds):\n # patches.append(Circle(c,radius=w[i]))\n # self.ax.add_collection(PatchCollection(patches, facecolor=cmap(norm(z)), edgecolor='k'))\n with open('./phi.dat','w') as f:\n for z in np.arange(0,np.max(self.radii)*2+0.5,0.01):\n w=np.zeros_like(self.radii)\n mask=2*self.radii>z\n w[mask]=z**0.5*np.sqrt(2*self.radii[mask]-z)\n phi=np.sum(np.pi*w**2)/52359.9\n # phi=np.sum(np.pi*w**2)/1309\n f.write('{:.6f} {:.6f}\\n'.format(z,phi))\n\n\n\n # Set axes\n buffer = 1.6\n lim = buffer*np.max(np.abs(self.crds))\n self.ax.set_xlim((-lim,lim))\n self.ax.set_ylim((-lim,lim))\n self.ax.set_axis_off()\n\n # Show figure\n if self.vis_save:\n plt.savefig('{}_{}_{}.png'.format(self.prefix,self.frame,self.vis_vortype),dpi=400)\n plt.show()", "def plot_detects(self, ax, color_by=None, add_cbar=False, \r\n plot_smoothed=False, plot_outliers=False, \r\n plot_changepts=False, plot_changepts_fill=False,\r\n mask=None, max_del_vel=1.0, **kwargs):\r\n\r\n lines = []\r\n if mask is None:\r\n rec_tr = self.rec_track\r\n else:\r\n rec_tr = self.rec_track[mask]\r\n ndetects = len(rec_tr)\r\n for nd in range(ndetects-1):\r\n tr = rec_tr[nd]\r\n xy1 = [tr.X,tr.Y]\r\n tr = rec_tr[nd+1]\r\n xy2 = [tr.X,tr.Y]\r\n lines.append([xy1, xy2])\r\n time_from_entry = rec_tr.Sec - self.t_entry + 1\r\n log_age = np.log10(time_from_entry)\r\n if color_by == 'age':\r\n kwargs['array'] = np.asarray(log_age)\r\n kwargs['cmap'] = cm_age\r\n kwargs['linewidths'] = (0.8)\r\n tr_lines = LineCollection(lines, **kwargs)\r\n if color_by not in ['daynight','routesel']:\r\n ax.add_collection(tr_lines)\r\n elif color_by == 'routesel':\r\n self.identify_route_selection(mask)\r\n # add raw position dots to vertices of lines\r\n\r\n clims = [ticks[0],ticks[-1]]\r\n tr_lines.set_clim(clims)\r\n if add_cbar:\r\n label = 'Time from Entry (seconds)'\r\n c1 = plt.gcf().colorbar(tr_lines)\r\n c1.set_label(label)\r\n c1.set_ticks(ticks)\r\n c1.set_ticklabels(tick_labels)\r\n # plot flagged positions \r\n if plot_outliers:\r\n kwargs['linewidths'] = (0.2)\r\n kwargs['linestyle'] = ('--')\r\n rec_tr_all = self.rec_track\r\n if mask is not None: # plot thin lines to outliers\r\n all_lines = []\r\n for nd in range(self.ndetects-1):\r\n tr = rec_tr_all[nd]\r\n xy1 = [tr.X,tr.Y]\r\n tr = rec_tr_all[nd+1]\r\n xy2 = [tr.X,tr.Y]\r\n all_lines.append([xy1, xy2])\r\n if color_by == 'age':\r\n time_from_entry = rec_tr_all.Sec - self.t_entry + 1\r\n log_age_all = np.log10(time_from_entry)\r\n kwargs['array'] = np.asarray(log_age_all)\r\n tr_lines_all = LineCollection(all_lines, **kwargs)\r\n if color_by in ['daynight','routesel']:\r\n ax.add_collection(tr_lines_all)\r\n tr_lines_all.set_clim(clims)\r\n tr_lines_all.set_zorder(1)\r\n # plot flagged outliers\r\n for nm, method in enumerate(self.outlier_methods):\r\n omarker = outlier_marker[method]\r\n ocolor = outlier_color[method]\r\n if method == 'Dry':\r\n color = ocolor\r\n else:\r\n color = \"None\"\r\n flagged = np.where(rec_tr_all[method] == 1)[0]\r\n ax.scatter(rec_tr_all.X[flagged], rec_tr_all.Y[flagged], \r\n marker=omarker, edgecolor=ocolor, \r\n c=color, s=10.0, zorder=8)\r\n if color_by == 'age':\r\n pos = ax.scatter(rec_tr.X, rec_tr.Y, marker='.', s=2.6, \r\n cmap=cm_age, vmin=ticks[0], vmax=ticks[1])\r\n elif color_by == 'daynight':\r\n i = self.mnames.index('daytime_entry')\r\n day = self.metrics[i]\r\n if day:\r\n colr = 'r'\r\n else:\r\n colr = 'k'\r\n pos = ax.scatter(rec_tr.X, rec_tr.Y, marker='.', s=2.6, c=colr)\r\n elif color_by == 'routesel':\r\n if self.route == 'Old':\r\n colr = 'r'\r\n elif self.route == 'SJ':\r\n colr = 'g'\r\n else:\r\n colr = 'gold'\r\n pos = ax.scatter(rec_tr.X, rec_tr.Y, marker='.', s=2.6, c=colr)\r\n\r\n if plot_smoothed: # plot smoothed positions on top\r\n #trs = self.rec_smooth_pos\r\n trs = self.rec_smooth_fill\r\n ax.scatter(trs.X, trs.Y, marker='o', color='darkviolet', s=0.8,\r\n zorder=9)\r\n\r\n if plot_changepts: # assumes smoothed position record is available\r\n trs = self.rec_smooth_pos\r\n #turn_angle = self.turn_angle(rec_track = trs)\r\n #turn_angle = trs.turn_angle\r\n mask = trs.change_pt_flag1\r\n ax.scatter(trs.X[mask], trs.Y[mask], marker='^', \r\n c='None',edgecolor='pink', s=8.0, zorder=9)\r\n# c=cm_red(turn_angle[mask1]), s=8.0, zorder=9)\r\n mask = trs.change_pt_flag2\r\n ax.scatter(trs.X[mask], trs.Y[mask], marker='^', \r\n c='None',edgecolor='salmon', s=16.0, zorder=9)\r\n# c=cm_red(turn_angle[mask2]), s=4.0, zorder=9)\r\n mask = trs.change_pt_flag3\r\n ax.scatter(trs.X[mask], trs.Y[mask], marker='^', \r\n c='None',edgecolor='r', s=32.0, zorder=9)\r\n if plot_changepts_fill: # assumes smoothed position record is available\r\n trs = self.rec_smooth_fill\r\n mask = trs.change_pt\r\n ax.scatter(trs.X[mask], trs.Y[mask], marker='^', \r\n c='None',edgecolor='r', s=32.0, zorder=9)\r\n # overwrite smoothed points using p_stat colorbar\r\n ps = ax.scatter(trs.X, trs.Y, marker='.', c=trs.p_stat, \r\n vmin=0, vmax=0.2, cmap=cm_red_r, s=1.0, zorder=9)\r\n cbar_ps = plt.gcf().colorbar(ps)\r\n cbar_ps.set_label('p statistic')\r\n cbar_ps.set_ticks([0,0.2])\r\n c1.set_ticklabels(tick_labels)", "def plot_roc(self, ax, prob, y, label='ROC'):\n self.df = self.calculate_threshold_values(prob, y)\n ax.plot([1] + list(self.df.fpr), [1] + list(self.df.tpr), label=label)\n x = [1] + list(self.df.fpr)\n y1 = [1] + list(self.df.tpr)\n y2 = x\n ax.fill_between(x, y1, y2, alpha=0.2)\n ax.set_xlabel('fpr')\n ax.set_ylabel('tpr')\n ax.set_title('ROC Curve')\n ax.legend()", "def plot_roc_curve(tprs, aucs, tag=''):\n fig, ax = plt.subplots()\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n mean_fpr = np.linspace(0, 1, 100)\n\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = np.std(aucs)\n ax.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\n ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic example\")\n ax.legend(loc=\"lower right\")\n plt.tight_layout()\n plt.savefig(f'roc_{tag}.png')\n plt.show()", "def reconstruction_plot(yyy, color = 'r'):\n length = len(yyy)\n plt.plot(np.linspace(0, 1, length)[:length // to_show + 1]\n , yyy[:length // to_show + 1], color)\n # plt.plot(np.linspace(0, 1, len(yyy)), yyy, color)", "def plot_lines(self):\n self.plot(3)", "def plot_roc_distributions(self, model_str, resampling_number, roc_curve_steps, roc_plot_path):\n sampling_types = ['Normal', 'Oversampling', 'Undersampling']\n\n PLOT_MARGIN = 0.05\n plt.rcParams[\"figure.figsize\"] = (16, 9)\n plt.subplots_adjust(wspace=0.2, hspace=0.4)\n sub_plot_index = 1\n\n for sampling_type in sampling_types:\n mean_fpr, mean_tpr, mean_threshold, mean_auc, std_auc = self._compute_mean_auc_data(sampling_type, model_str, resampling_number, roc_curve_steps)\n\n plt.subplot(int('22' + str(sub_plot_index)))\n\n sub_plot_index += 1\n\n plt.plot(mean_fpr, mean_tpr, color='g', label='AUC:{0}, STD:{1}'.format(round(mean_auc, 2), round(std_auc, 2)))\n plt.plot(mean_fpr, mean_threshold, linestyle='--', lw=2, color='b', label='Thresholds')\n plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance')\n\n plt.xlim([0 - PLOT_MARGIN, 1 + PLOT_MARGIN])\n plt.ylim([0 - PLOT_MARGIN, 1 + PLOT_MARGIN])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(sampling_type + ' ROC Distribution')\n plt.legend(loc=\"lower right\")\n\n plt.savefig(roc_plot_path)\n plt.clf()", "def plot(self, *args, **kwargs):\r\n lines = super(RadarAxes, self).plot(*args, **kwargs)\r\n for line in lines:\r\n self._close_line(line)", "def plot_sherpa_contours():\n log.info(\"plotting parameters contours obtained from sherpa\")\n # where to take the results, configurations for the individual butterflies\n instruments = [\"fermi\", \"magic\", \"hess\", \"fact\", \"veritas\", \"joint\"]\n labels = [\"Fermi-LAT\", \"MAGIC\", \"H.E.S.S.\", \"FACT\", \"VERITAS\", \"joint fit\"]\n colors = [\"#21ABCD\", \"#FF9933\", \"#5A4FCF\", \"#5CC184\", \"#702963\", \"crimson\"]\n lss = [\"--\", \"--\", \"--\", \"--\", \"--\", \"-\"]\n\n fig, axarr = plt.subplots(1, 3, figsize=(18, 6))\n\n # with one loop we realize all the contour plots\n for instrument, label, color, ls in zip(instruments, labels, colors, lss):\n\n path = config.repo_path / f\"results/fit/sherpa/{instrument}\"\n\n contours_path = path / \"fit_contours_logparabola.npy\"\n results_path = path / \"fit_results_logparabola.yaml\"\n\n if not path.exists():\n log.warning(f\"Missing: {path} . Skipping.\")\n continue\n\n # load the contours and the results of the fit\n contours = np.load(contours_path).tolist()\n results = load_yaml(results_path)\n\n # define a 2 x 2 matrix to visualise the plot\n # we will delete one of the subplots and make something like a corner plot\n # useful variables for the plot\n ampl_range = contours[\"contour_ampl_c1\"][\"x0_range\"]\n c1_range = contours[\"contour_ampl_c1\"][\"x1_range\"]\n c2_range = contours[\"contour_ampl_c2\"][\"x1_range\"]\n # actual values output of the fit\n # remember in sherpa notation: (amplitude->ampl, alpha->c1, beta->c2)\n ampl = results[\"parameters\"][0][\"value\"]\n c1 = results[\"parameters\"][2][\"value\"]\n c2 = results[\"parameters\"][3][\"value\"]\n\n # axarr[0,0]\n extent = [ampl_range[0] * 1e9, ampl_range[1] * 1e9, c1_range[0], c1_range[1]]\n\n axarr[0].contour(\n contours[\"contour_ampl_c1\"][\"like_values\"],\n contours[\"contour_ampl_c1\"][\"levels\"],\n origin=\"lower\",\n extent=extent,\n colors=color,\n linewidths=(2., 1.5, 1.3),\n linestyles=(\"-\", \"--\", \":\"),\n )\n\n # print actual value\n axarr[0].plot(ampl, c1, marker=\"X\", markersize=7, color=color)\n axarr[0].set_xlabel(\n r\"$f_0 / (\\mathrm{TeV} \\, \\mathrm{cm}^{-2} \\mathrm{s}^{-1})$\"\n )\n axarr[0].set_ylabel(r\"$\\Gamma$\")\n\n extent = [ampl_range[0] * 1e9, ampl_range[1] * 1e9, c2_range[0], c2_range[1]]\n\n axarr[1].contour(\n contours[\"contour_ampl_c2\"][\"like_values\"],\n contours[\"contour_ampl_c2\"][\"levels\"],\n origin=\"lower\",\n extent=extent,\n colors=color,\n linewidths=(2., 1.5, 1.3),\n linestyles=(\"-\", \"--\", \":\"),\n )\n\n # print actual value\n axarr[1].plot(ampl, c2, marker=\"X\", markersize=7, color=color)\n axarr[1].set_ylabel(r\"$\\beta$\")\n axarr[1].set_xlabel(\n r\"$f_0 / (\\mathrm{TeV} \\, \\mathrm{cm}^{-2} \\, \\mathrm{s}^{-1})$\"\n )\n\n extent = [c1_range[0], c1_range[1], c2_range[0], c2_range[1]]\n\n axarr[2].contour(\n contours[\"contour_c1_c2\"][\"like_values\"],\n contours[\"contour_c1_c2\"][\"levels\"],\n origin=\"lower\",\n extent=extent,\n colors=color,\n linewidths=(2., 1.5, 1.3),\n linestyles=(\"-\", \"--\", \":\"),\n )\n\n # print actual value\n axarr[2].plot(c1, c2, marker=\"X\", markersize=7, color=color)\n axarr[2].set_ylabel(r\"$\\beta$\")\n axarr[2].set_xlabel(r\"$\\Gamma$\")\n\n # axarr[0,1] is for the legend\n import matplotlib.lines as mlines\n\n sigma_1 = mlines.Line2D(\n [], [], color=\"k\", marker=\"\", ls=\"-\", lw=2., label=r\"1 $\\sigma$ contour\"\n )\n sigma_2 = mlines.Line2D(\n [], [], color=\"k\", marker=\"\", ls=\"--\", lw=1.5, label=r\"2 $\\sigma$ contour\"\n )\n sigma_3 = mlines.Line2D(\n [], [], color=\"k\", marker=\"\", ls=\":\", lw=1.3, label=r\"3 $\\sigma$ contour\"\n )\n fermi = mlines.Line2D(\n [], [], color=\"#21ABCD\", marker=\"\", ls=\"-\", lw=2., label=\"Fermi-LAT\"\n )\n magic = mlines.Line2D(\n [], [], color=\"#FF9933\", marker=\"\", ls=\"-\", lw=2., label=\"MAGIC\"\n )\n hess = mlines.Line2D(\n [], [], color=\"#5A4FCF\", marker=\"\", ls=\"-\", lw=2., label=\"H.E.S.S.\"\n )\n fact = mlines.Line2D(\n [], [], color=\"#5CC184\", marker=\"\", ls=\"-\", lw=2., label=\"FACT\"\n )\n veritas = mlines.Line2D(\n [], [], color=\"#702963\", marker=\"\", ls=\"-\", lw=2., label=\"VERITAS\"\n )\n joint = mlines.Line2D(\n [], [], color=\"crimson\", marker=\"\", ls=\"-\", lw=2., label=\"joint fit\"\n )\n axarr[2].legend(\n handles=[sigma_1, sigma_2, sigma_3, fermi, magic, hess, fact, veritas, joint],\n loc=3,\n fontsize=12,\n )\n # axarr[2].set_axis_off()\n\n plt.tight_layout()\n filename = \"results/figures/sherpa_logparabola_contour.png\"\n fig.savefig(filename)\n log.info(f\"Writing {filename}\")\n fig.savefig(filename)", "def plot_multi_roc_curve(y_trues, y_pred_probs, labels, **params):\n\n figure = plt.figure(figsize=params.get('figsize', (17, 10)))\n roc_aucs = []\n for y_true, y_pred_prob, label in zip(y_trues, y_pred_probs, labels):\n fpr, tpr, thresholds = roc_curve(y_true, y_pred_prob)\n roc_auc = auc(fpr, tpr)\n roc_aucs.append(roc_auc)\n plt.plot(fpr, tpr, label=f'{label} ROC curve (area = %0.5f)' % roc_auc)\n\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xticks(np.arange(0.0, 1.1, step=0.1))\n plt.yticks(np.arange(0.0, 1.1, step=0.1))\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC curve')\n plt.legend(loc=\"lower right\")\n\n plt.show()\n\n return figure, roc_aucs", "def plot_vis_crosshairs(fig, vis_data, title, crosscorr, ants, inputs, upper=True, units='', **kwargs):\n fig.subplots_adjust(wspace=0., hspace=0.)\n data_lim = np.max([np.abs(vis).max() for vis in vis_data])\n ax_lim = 1.05 * data_lim\n for n, (indexA, indexB) in enumerate(crosscorr):\n subplot_index = (len(ants) * indexA + indexB + 1) if upper else (indexA + len(ants) * indexB + 1)\n ax = fig.add_subplot(len(ants), len(ants), subplot_index)\n for vis in vis_data:\n ax.plot(vis[:, n].real, vis[:, n].imag, **kwargs)\n ax.axhline(0, lw=0.5, color='k')\n ax.axvline(0, lw=0.5, color='k')\n ax.add_patch(mpl.patches.Circle((0., 0.), data_lim, facecolor='none', edgecolor='k', lw=0.5))\n ax.add_patch(mpl.patches.Circle((0., 0.), 0.5 * data_lim, facecolor='none', edgecolor='k', lw=0.5))\n ax.axis('image')\n ax.axis([-ax_lim, ax_lim, -ax_lim, ax_lim])\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_frame_on(False)\n if upper:\n if indexA == 0:\n ax.xaxis.set_label_position('top')\n ax.set_xlabel(inputs[indexB][3:])\n if indexB == len(ants) - 1:\n ax.yaxis.set_label_position('right')\n ax.set_ylabel(inputs[indexA][3:], rotation='horizontal')\n else:\n if indexA == 0:\n ax.set_ylabel(inputs[indexB][3:], rotation='horizontal')\n if indexB == len(ants) - 1:\n ax.set_xlabel(inputs[indexA][3:])\n fig.text(0.5, 0.95 if upper else 0.05, title, ha='center', va='bottom' if upper else 'top')\n fig.text(0.95 if upper else 0.05, 0.5, 'Outer radius = %g %s' % (data_lim, units), va='center', rotation='vertical')", "def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)", "def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)", "def plot_all_rfc_by_ar(ipcc_counts_rfc, meta, cmap):\r\n ipcc_counts_rfc = rp_da.scale_counts(ipcc_counts_rfc.copy())\r\n # prep for plotting\r\n counts_meta = rp_da.merge_counts_meta(ipcc_counts_rfc, meta)\r\n rfcs = list(rp_da.create_rfc_dict().keys())\r\n # Plot the seperate temps\r\n ax = counts_meta.groupby(\"AR\")[rfcs].mean().plot(kind=\"bar\", stacked=True, cmap=cmap) \r\n handles, labels = ax.get_legend_handles_labels()\r\n ax.legend(handles[::-1], labels[::-1], loc=7)\r\n # Make pretty\r\n ax.set_ylabel(\"% Mentions\")\r\n plot_nicer(ax)\r\n fig = plt.gcf()\r\n fig.set_size_inches(8,8)\r\n fig.tight_layout()\r\n plt.savefig(\"Figures\"+ os.sep + \"Supplementary\" + os.sep + \"AR_all_rfc_and_grouped.png\", dpi=200)\r\n plt.close()", "def plot_ord(self,levels=None, gradient_array=False):\n ds = self.ds\n if levels is None:\n eps = 1e-4\n maxz = ds.occrd.where(ds.ntrial > self.ntrials_min).max()\n maxz = np.round(maxz*1.1,3)\n levels = linspace(0,maxz+eps,14)\n if gradient_array:\n return ds.kxc.values, ds.kyc.values, ds.occrd.values\n\n cbarticks = levels[::2]\n cbarticklabels = [\"{:.1f}\".format(1e2*_yt) for _yt in cbarticks]\n cmap = 'YlGn' \n kw = dict(levels=levels,extend='neither',cmap=cmap,zorder=0)\n qc = contourf(ds.kxc,ds.kyc,ds.occrd,**kw)\n cbar = colorbar(qc,shrink=0.8,format='%.2f')\n cbar.set_label(self.cbarlabel,size='small')\n cbar.ax.tick_params(labelsize='x-small')", "def plot_correlations(X, netvars, colorbar = False):\n \n C = np.corrcoef(X, rowvar = False) * 100\n C[np.abs(C) < 0.5] = 0 # round near zero to 0\n\n N = np.ceil(C.shape[0]/3)\n fig,ax = plt.subplots(1,1,figsize=(N,N))\n\n ax.imshow(C)\n ax = annotate_heatmap(X = C, ax = ax, xlabels = netvars,\n ylabels = netvars, decimals = 0, x_rot = 90, y_rot = 0, color = \"w\")\n ax.set_title('linear correlation $\\\\in$ [-100,100]')\n \n if colorbar:\n cb = plt.colorbar()\n\n print(__name__ + f'.plot_correlations: [done]')\n\n return fig,ax", "def xx_plot(epoch, model, features, filters, figname, fgal=0.5):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n N = 20000\n X = X[:N]\n Xcov = Xcov[:N]\n Xcoadd = Xcoadd[:N]\n Xcoaddcov = Xcoaddcov[:N]\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n # Calculate the posteriors, draw samples\n a, m, v = model.posterior(X, Xcov)\n posts = np.zeros_like(X)\n for i in range(X.shape[0]):\n posts[i] = model.sample(a[i], m[i], v[i], size=1)\n\n lo = [0.01, 0.02, 0.06]\n hi = [0.99, 0.96, 0.98]\n idx = [0, 1, 4]\n bins = [100, 100, 300]\n label = ['psfmag $r$', 'modelmag $u-g$', 'modelmag $i-z$']\n N = len(idx)\n fs = 5\n lsize = 20\n f = pl.figure(figsize=(N * fs, 2 * fs))\n pl.subplots_adjust(wspace=0.3)\n for i in range(N):\n x = X[:, idx[i]]\n y = Xcoadd[:, idx[i]]\n p = posts[:, idx[i]]\n ind = (y > -999) & (Xcoaddcov[:, idx[i]][:, idx[i]] < 10.)\n x = x[ind]\n y = y[ind]\n p = p[ind]\n ax = pl.subplot(2, N, i + 1)\n v = np.sort(x)\n mn, mx = v[np.int(lo[i] * x.shape[0])], v[np.int(hi[i] * x.shape[0])]\n hist2d(x, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('Single Epoch ' + label[i], fontsize=lsize)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n ax = pl.subplot(2, N, i + 4)\n hist2d(p, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('XD Posterior ' + label[i], fontsize=lsize)\n f.savefig(figname, bbox_inches='tight')", "def group_causality(sig_list, condition, freqs, ROI_labels=None,\n out_path=None, submount=10):\n print 'Running group causality...'\n set_directory(out_path)\n sig_caus = []\n\n for f in sig_list:\n sig_cau = np.load(f)\n print sig_cau.shape[-1]\n sig_caus.append(sig_cau)\n\n sig_caus = np.array(sig_caus)\n sig_group = sig_caus.sum(axis=0)\n plt.close()\n for i in xrange(len(sig_group)):\n fmin, fmax = freqs[i][0], freqs[i][1]\n cau_band = sig_group[i]\n # cau_band[cau_band < submount] = 0\n cau_band[cau_band < submount] = 0\n # fig, ax = pl.subplots()\n cmap = plt.get_cmap('hot', cau_band.max()+1-submount)\n cmap.set_under('gray')\n plt.matshow(cau_band, interpolation='nearest', vmin=submount, cmap=cmap)\n if ROI_labels == None:\n ROI_labels = np.arange(cau_band.shape[0]) + 1\n pl.xticks(np.arange(cau_band.shape[0]), ROI_labels, fontsize=9, rotation='vertical')\n pl.yticks(np.arange(cau_band.shape[0]), ROI_labels, fontsize=9)\n # pl.imshow(cau_band, interpolation='nearest')\n # pl.set_cmap('BlueRedAlpha')\n np.save(out_path + '/%s_%s_%sHz.npy' %\n (condition, str(fmin), str(fmax)), cau_band)\n v = np.arange(submount, cau_band.max()+1, 1)\n\n # cax = ax.scatter(x, y, c=z, s=100, cmap=cmap, vmin=10, vmax=z.max())\n # fig.colorbar(extend='min')\n\n plt.colorbar(ticks=v, extend='min')\n # pl.show()\n plt.savefig(out_path + '/%s_%s_%sHz.png' %\n (condition, str(fmin), str(fmax)), dpi=300)\n plt.close()\n return", "def visualize(data_stream, runs, coke_windows, title,\n each=10, alpha=0.3, run_color=\"forestgreen\",\n coke_color=\"royalblue\", figsize=(12,4)):\n\n plt.figure(figsize=figsize)\n\n # Plot data stream\n data_stream.iloc[::each].plot(linewidth=1, ax=plt.gca())\n\n ax = plt.gca()\n\n # Add overlays for runs\n for wi, window in runs.iterrows():\n ax.axvspan(window[\"run_start\"], window[\"run_end\"], alpha=alpha, color=run_color)\n\n # Add overlays for coke windows\n for wi, window in coke_windows.iterrows():\n ax.axvspan(window[\"start\"], window[\"end\"], alpha=alpha, color=coke_color)\n\n plt.title(title, fontsize=12)\n plt.tight_layout()\n plt.show()", "def connect_rug(self):\n for index, value in self.df.loc[\n self.df[f\"highlight_{self.y}\"] == 1\n ].iterrows():\n color = (\n self.fgcolors[0]\n if self.df.loc[index, self.obs] == 0\n else self.fgcolors[1]\n )\n self.ax.hlines(\n y=value[f\"order_{self.y}\"],\n xmin=value[f\"order_{self.x}\"],\n xmax=len(self.df) + self.pad,\n color=color,\n alpha=self.con_alpha,\n zorder=3,\n lw=1.5,\n )\n # vlines for the x-axis.\n for index, value in self.df.loc[\n self.df[f\"highlight_{self.x}\"] == 1\n ].iterrows():\n color = (\n self.fgcolors[0]\n if self.df.loc[index, self.obs] == 0\n else self.fgcolors[1]\n )\n self.ax.vlines(\n x=value[f\"order_{self.x}\"],\n ymin=value[f\"order_{self.y}\"],\n ymax=0 - self.pad,\n color=color,\n alpha=self.con_alpha,\n zorder=3,\n lw=1.5,\n )\n return self", "def plot_cones(data, plot_hist=False, cone_radius=12.0):\n pickle_in = open(\"sparseMICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n # lenses = sorted_data[f\"Radius{str(cone_radius)}\"]\n # # Go through all SNe\n # for SN_num, key in enumerate(lenses.keys()):\n # if key != 'WEIGHT':\n # cone_indices = np.array([], dtype=np.int16)\n # # Get shells from all previous RADII\n # for r in RADII[0:np.argmin(np.abs(RADII - np.array(cone_radius))) + 1]:\n # cone_indices = np.append(cone_indices, sorted_data[f\"Radius{r}\"][key])\n # # Get redshifts of all galaxies in each SN cone\n # cone_zs[key] = all_zs[cone_indices]\n # print(lenses.keys())\n patches = []\n SNRA = SN_data['SNRA']\n SNDEC = SN_data['SNDEC']\n SNz = SN_data['SNZ']\n for x, y in zip(SNRA, SNDEC):\n circle = Circle((x, y), cone_radius/60.0)\n patches.append(circle)\n\n RA_gal = data['RA']\n DEC_gal = data['DEC']\n z_gal = data['z']\n fig, ax = plt.subplots()\n ax.plot(RA_gal, DEC_gal, marker='o', linestyle='', markersize=1, color=[0.5, 0.5, 0.5])\n contRAs = []\n contDECs = []\n for ra, dec, z in zip(SNRA, SNDEC, SNz):\n indices1 = np.logical_and(z_gal <= z, (RA_gal - ra) ** 2 + (DEC_gal - dec) ** 2 <=\n (cone_radius / 60.0) ** 2)\n contRAs = np.append(contRAs, RA_gal[indices1])\n contDECs = np.append(contDECs, DEC_gal[indices1])\n ax.plot(contRAs, contDECs, marker='o', linestyle='', markersize=3, color=colours[1])\n p = PatchCollection(patches, alpha=0.4, color=colours[1])\n ax.add_collection(p)\n\n ax.plot(SNRA, SNDEC, marker='o', linestyle='', markersize=3, label='Supernova', color=colours[3])\n plt.xlabel('$\\\\alpha$')\n plt.ylabel('$\\delta$')\n plt.text(27, -0.8, f\"{cone_radius}' radius\")\n # plt.legend(loc='lower right')\n plt.axis('equal')\n plt.xlim([10.0, 11.5])\n plt.ylim([1.5, 2.5])\n plt.show()\n\n if plot_hist:\n labels = ['Galaxies', 'Supernovae']\n cols = [green, yellow]\n for num, z in enumerate([np.array(z_gal), np.array(SNz)]):\n counts, bin_edges = np.histogram(z, bins=np.arange(0, 1.5 + 0.05, 0.05))\n plt.bar(0.5 * (bin_edges[1:] + bin_edges[:-1]), counts / max(counts), 0.05, linewidth=1, fc=cols[num],\n label=f'{labels[num]}', edgecolor=colours[num])\n plt.xlabel('$z$')\n plt.ylabel('Normalised Count')\n plt.xlim([0, 1.45])\n plt.tight_layout()\n plt.legend(frameon=0)\n\n plt.show()", "def plot(self, corner = True):\n pos = self.posterior_samples\n if self.verbose>=3 and self.NS.prior_sampling is False:\n pri = self.prior_samples\n mc = self.mcmc_samples\n elif self.verbose>=3 or self.NS.prior_sampling is True:\n pri = self.prior_samples\n mc = None\n else:\n pri = None\n mc = None\n from . import plot\n if self.NS.prior_sampling is False:\n for n in pos.dtype.names:\n plot.plot_hist(pos[n].ravel(), name = n,\n prior_samples = self.prior_samples[n].ravel() if pri is not None else None,\n mcmc_samples = self.mcmc_samples[n].ravel() if mc is not None else None,\n filename = os.path.join(self.output,'posterior_{0}.pdf'.format(n)))\n for n in self.nested_samples.dtype.names:\n plot.plot_chain(self.nested_samples[n],name=n,filename=os.path.join(self.output,'nschain_{0}.pdf'.format(n)))\n if self.NS.prior_sampling is False:\n import numpy as np\n plotting_posteriors = np.squeeze(pos.view((pos.dtype[0], len(pos.dtype.names))))\n if pri is not None:\n plotting_priors = np.squeeze(pri.view((pri.dtype[0], len(pri.dtype.names))))\n else:\n plotting_priors = None\n\n if mc is not None:\n plotting_mcmc = np.squeeze(mc.view((mc.dtype[0], len(mc.dtype.names))))\n else:\n plotting_mcmc = None\n\n if corner:\n plot.plot_corner(plotting_posteriors,\n ps=plotting_priors,\n ms=plotting_mcmc,\n labels=pos.dtype.names,\n filename=os.path.join(self.output,'corner.pdf'))\n plot.plot_indices(self.NS.insertion_indices, filename=os.path.join(self.output, 'insertion_indices.pdf'))", "def plot_figure9_lower():\n column_titles = None\n\n height_ceiling = 500.\n height_ceiling_id = list(height_range_ceilings).index(height_ceiling)\n\n fixed_height_ref = 100.\n fixed_height_id = list(fixed_heights).index(fixed_height_ref)\n\n plot_item0 = {\n 'data': nc.variables[\"p_ceiling_perc5\"][height_ceiling_id, :, :]\n / nc.variables[\"p_fixed_perc5\"][fixed_height_id, :, :],\n 'contour_fill_levels': np.linspace(1, 6., 21),\n 'contour_line_levels': np.arange(2., 5., 1.),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': np.linspace(1, 6., 21)[::4],\n 'colorbar_tick_fmt': '{:.1f}',\n 'colorbar_label': 'Increase factor [-]',\n 'extend': 'max',\n }\n plot_item1 = {\n 'data': nc.variables[\"p_ceiling_perc32\"][height_ceiling_id, :, :]\n / nc.variables[\"p_fixed_perc32\"][fixed_height_id, :, :],\n 'contour_fill_levels': np.linspace(1, 3.5, 21),\n 'contour_line_levels': np.linspace(1, 3.5, 21)[::4],\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': np.linspace(1, 3.5, 21)[::4],\n 'colorbar_tick_fmt': '{:.1f}',\n 'colorbar_label': 'Increase factor [-]',\n 'extend': 'max',\n }\n plot_item2 = {\n 'data': nc.variables[\"p_ceiling_perc50\"][height_ceiling_id, :, :]\n / nc.variables[\"p_fixed_perc50\"][fixed_height_id, :, :],\n 'contour_fill_levels': np.linspace(1, 3.5, 21),\n 'contour_line_levels': np.linspace(1, 3.5, 21)[::4],\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': np.linspace(1, 3.5, 21)[::4],\n 'colorbar_tick_fmt': '{:.1f}',\n 'colorbar_label': 'Increase factor [-]',\n 'extend': 'max',\n }\n\n plot_items = [plot_item0, plot_item1, plot_item2]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)", "def canny_compare_plot(results, results_canny):\n\tbin_result = (results >= 0.1) * 1 # Convert \"standard\" predicted segmentation mask to binary mask \n\tbin_result_canny = (results_canny >= 0.2) * 1 # Convert \"overlayed\" predicted segmentation mask to binary mask \n\ttitles=['Image','Predicted Mask','Predicted Mask Canny','Binary Mask','Binary Mask Canny','Ground Truth']\n\tr=random.sample(range(17),4) # Random sample for test images to display\n\n\t# Define specification of our plot \n\tfig, axs = plt.subplots(4, 6, figsize=(15, 15), facecolor='w', edgecolor='k')\n\tfig.subplots_adjust(hspace = 0.5, wspace=0.2)\n\taxs = axs.ravel()\n\n\tfor i in range(4): # 1 iteration for each selected test image\n\t\t# Displays test image \n\t\taxs[(i*6)+0].set_title(titles[0])\n\t\tfname = 'test/images/img/'+str(r[i])+'.png'\n\t\timage = Image.open(fname).convert(\"L\")\n\t\tarr = np.asarray(image)\n\t\taxs[(i*6)+0].imshow(arr/255, cmap='gray')\n\n\t\t# Displays \"standard\" predicted segmentation mask\n\t\taxs[(i*6)+1].set_title(titles[1])\n\t\tI=np.squeeze(results[r[i],:,:,:])\n\t\taxs[(i*6)+1].imshow(I, cmap=\"gray\")\n\n\t\t# Displays \"overlayed\" predicted segmentation mask\n\t\taxs[(i*6)+2].set_title(titles[3])\n\t\tI=np.squeeze(results_canny[r[i],:,:,:])\n\t\taxs[(i*6)+2].imshow(I, cmap=\"gray\")\n\n\t\t# Displays \"standard\" binary mask\n\t\taxs[(i*6)+3].set_title(titles[2])\n\t\tI=np.squeeze(bin_result[r[i],:,:,:])\n\t\taxs[(i*6)+3].imshow(I, cmap=\"gray\")\n\n\t\t# Displays \"overlayed\" binary mask\n\t\taxs[(i*6)+4].set_title(titles[4])\n\t\tI=np.squeeze(bin_result_canny[r[i],:,:,:])\n\t\taxs[(i*6)+4].imshow(I, cmap=\"gray\")\n\n\t\t# Displays Ground truth segmentation mask \n\t\taxs[(i*6)+5].set_title(titles[5])\n\t\tfname = 'test/label/img/'+str(r[i])+'.png'\n\t\timage = Image.open(fname).convert(\"L\")\n\t\tarr = np.asarray(image)\n\t\taxs[(i*6)+5].imshow(arr/255, cmap='gray')", "def plotAllRobots(self):\n plt.figure(figsize=(10, 10))\n ax = plt.gca()\n\n for robotID in self.robotDict.keys():\n isCollided = self.isCollided(robotID)\n self.plotRobotCore(ax=ax, robotID=robotID, isCollided=isCollided)\n\n rr = 340.\n plt.xlim(np.array([-1., 1.]) * rr)\n plt.ylim(np.array([-1., 1.]) * rr)\n return", "def raft_display_allchans(inputfile, datadir=''):\n raftarrays, seglist = get_scandata_raft(inputfile, datadir)\n\n fig, axes = plt.subplots(nrows = 3, ncols = 3, figsize=(15, 12))\n # when REB2 data is missing\n # fig, axes = plt.subplots(nrows = 2, ncols = 3, figsize=(15, 9))\n color_idx = [plt.cm.jet(i) for i in np.linspace(0, 1, 16)]\n\n # plot all channels, with one subplot per CCD\n listaxes = []\n for num,tmscope in enumerate(raftarrays):\n ax = axes[num / 3, num % 3 ]\n\n # single CCD plot\n for c in range(16):\n # image extensions are labeled as 'Segment00' in CCS\n # they are in extensions 1 to 16\n #print tmscope.shape\n tmchan = tmscope[c].mean(axis=0)\n ax.plot(tmchan, label=c, color=color_idx[c])\n if num == 0:\n # for common legend\n listaxes.append(ax)\n\n ax.set_xlim(0, 255)\n ax.set_xticks(np.arange(0, 256, 32))\n ax.set_title(seglist[num])\n #ax.set_xlabel('Time increment (10 ns)')\n #ax.set_ylabel('Scan (ADU)')\n ax.grid(True)\n\n # TODO: common legend that works\n #plt.legend(handles=listaxes,loc = 'upper center', bbox_to_anchor = (0.5, 0), bbox_transform = plt.gcf().transFigure)\n dataname = scope.get_rootfile(inputfile)\n #plt.title(dataname) # TODO: put it above all plots\n plt.savefig(os.path.join(datadir, \"multiscope-%s.png\" % dataname))\n plt.show()", "def plotPsCurve(mcoolsPath:list,celltypeNames:list,chroms:list,resolution=100000,title=\"P(s) curve\",plotType=\"interaction\",base=1.1,log_x=True,log_y=True):\n import plotly.express as px\n from IPython.display import Image\n\n #Calculate P(s) data, get a 3 column pd.DataFrame with (bin,resolution,celltype)\n psDataAll = []\n for i in range(len(mcoolsPath)):\n psDataAll.append(compartment.getPsData(mcoolsPath[i],[\"chr\"+str(i+1) for i in range(len(chroms))],resolution=resolution,celltype=celltypeNames[i],base=base)) \n merged = pd.concat(psDataAll)\n\n data = pd.merge(merged,merged.groupby(\"celltype\").sum(),how=\"left\",on=\"celltype\").assign(prob= lambda df: df.aveCount_x/df.aveCount_y)\n\n fig = px.line(x=data[\"bin_x\"]*resolution,y=data[\"prob\"],color=data[\"celltype\"],title=title,log_x=log_x,log_y=log_y).update_layout(template='simple_white')\n fig.update_layout(width=800,height=600)\n fig.update_layout(xaxis_title=\"Genomic Distance(bp)\",\n yaxis_title=\"Contact Probability\")\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def make_plot(d0,t0,d1,t1,d2,t2,d3,t3,suptitle,path_out):\n fig0 = plt.figure(1,(10.,7.))\n grid = ImageGrid(fig0, 111, # similar to subplot(111)\n nrows_ncols = (2, 2), # creates 2x2 grid of axes\n axes_pad=0.4, # pad between axes in inch.\n share_all=True, # share axes\n cbar_mode='each')\n \n im = grid[0].pcolor(d0)\n grid[0].set_title(t0)\n grid.cbar_axes[0].colorbar(im)\n grid[0].axis([0,d0.shape[1],0,d0.shape[0]])\n \n im = grid[1].pcolor(d1)\n grid[1].set_title(t1)\n grid.cbar_axes[1].colorbar(im)\n grid[0].axis([0,d1.shape[1],0,d1.shape[0]])\n \n im = grid[2].pcolor(d2)\n grid[2].set_title(t2)\n grid.cbar_axes[2].colorbar(im)\n grid[0].axis([0,d2.shape[1],0,d2.shape[0]])\n \n im = grid[3].pcolor(d3)\n grid[3].set_title(t3) \n grid.cbar_axes[3].colorbar(im)\n grid[0].axis([0,d3.shape[1],0,d3.shape[0]])\n\n \n fig0.suptitle(suptitle,fontsize=18)\n \n fig0.savefig(path_out, dpi=300)\n fig0.clf()\n return", "def plots(self, events=None, title=None):\n data = self.data\n P = PH.regular_grid(3 , 1, order='columnsfirst', figsize=(8., 6), showgrid=False,\n verticalspacing=0.08, horizontalspacing=0.08,\n margins={'leftmargin': 0.07, 'rightmargin': 0.20, 'topmargin': 0.03, 'bottommargin': 0.1},\n labelposition=(-0.12, 0.95))\n scf = 1e12\n ax = P.axarr\n ax = ax.ravel()\n PH.nice_plot(ax)\n for i in range(1,2):\n ax[i].get_shared_x_axes().join(ax[i], ax[0])\n # raw traces, marked with onsets and peaks\n tb = self.timebase[:len(data)]\n ax[0].plot(tb, scf*data, 'k-', linewidth=0.75, label='Data') # original data\n ax[0].plot(tb[self.onsets], scf*data[self.onsets], 'k^', \n markersize=6, markerfacecolor=(1, 1, 0, 0.8), label='Onsets')\n if len(self.onsets) is not None:\n# ax[0].plot(tb[events], data[events], 'go', markersize=5, label='Events')\n# ax[0].plot(tb[self.peaks], self.data[self.peaks], 'r^', label=)\n ax[0].plot(tb[self.smpkindex], scf*np.array(self.smoothed_peaks), 'r^', label='Smoothed Peaks')\n ax[0].set_ylabel('I (pA)')\n ax[0].set_xlabel('T (s)')\n ax[0].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n \n # deconvolution trace, peaks marked (using onsets), plus threshold)\n ax[1].plot(tb[:self.Crit.shape[0]], self.Crit, label='Deconvolution') \n ax[1].plot([tb[0],tb[-1]], [self.sdthr, self.sdthr], 'r--', linewidth=0.75, \n label='Threshold ({0:4.2f}) SD'.format(self.sdthr))\n ax[1].plot(tb[self.onsets]-self.idelay, self.Crit[self.onsets], 'y^', label='Deconv. Peaks')\n if events is not None: # original events\n ax[1].plot(tb[:self.Crit.shape[0]][events], self.Crit[events],\n 'ro', markersize=5.)\n ax[1].set_ylabel('Deconvolution')\n ax[1].set_xlabel('T (s)')\n ax[1].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n# print (self.dt, self.template_tmax, len(self.template))\n # averaged events, convolution template, and fit\n if self.averaged:\n ax[2].plot(self.avgeventtb[:len(self.avgevent)], scf*self.avgevent, 'k', label='Average Event')\n maxa = np.max(self.sign*self.avgevent)\n #tpkmax = np.argmax(self.sign*self.template)\n if self.template is not None:\n maxl = int(np.min([len(self.template), len(self.avgeventtb)]))\n temp_tb = np.arange(0, maxl*self.dt, self.dt)\n #print(len(self.avgeventtb[:len(self.template)]), len(self.template))\n ax[2].plot(self.avgeventtb[:maxl], scf*self.sign*self.template[:maxl]*maxa/self.template_amax, \n 'r-', label='Template')\n # compute double exp based on rise and decay alone\n # print('res rise: ', self.res_rise)\n # p = [self.res_rise.x[0], self.res_rise.x[1], self.res_decay.x[1], self.res_rise.x[2]]\n # x = self.avgeventtb[:len(self.avg_best_fit)]\n # y = self.doubleexp(p, x, np.zeros_like(x), risepower=4, fixed_delay=0, mode=0)\n # ax[2].plot(x, y, 'b--', linewidth=1.5)\n tau1 = np.power(10, (1./self.risepower)*np.log10(self.tau1*1e3)) # correct for rise power\n tau2 = self.tau2*1e3\n ax[2].plot(self.avgeventtb[:len(self.avg_best_fit)], scf*self.avg_best_fit, 'c--', linewidth=2.0,\n label='Best Fit:\\nRise Power={0:.2f}\\nTau1={1:.3f} ms\\nTau2={2:.3f} ms\\ndelay: {3:.3f} ms'.\n format(self.risepower, self.res_rise.x[1]*1e3, self.res_decay.x[1]*1e3, self.bfdelay*1e3))\n # ax[2].plot(self.avgeventtb[:len(self.decay_fit)], self.sign*scf*self.rise_fit, 'g--', linewidth=1.0,\n # label='Rise tau {0:.2f} ms'.format(self.res_rise.x[1]*1e3))\n # ax[2].plot(self.avgeventtb[:len(self.decay_fit)], self.sign*scf*self.decay_fit, 'm--', linewidth=1.0,\n # label='Decay tau {0:.2f} ms'.format(self.res_decay.x[1]*1e3))\n if title is not None:\n P.figure_handle.suptitle(title)\n ax[2].set_ylabel('Averaged I (pA)')\n ax[2].set_xlabel('T (s)')\n ax[2].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n if self.fitted:\n print('measures: ', self.risetenninety, self.decaythirtyseven)\n mpl.show()", "def plot_figure11():\n height_ceilings = [200., 300., 400.]\n height_ceiling_ids = [list(height_range_ceilings).index(height_ceiling) for height_ceiling in height_ceilings]\n\n baseline_height_ceiling = 500.\n baseline_height_ceiling_id = list(height_range_ceilings).index(baseline_height_ceiling)\n\n plot_item00 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[0], :, :],\n 'contour_fill_levels': np.linspace(50, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(50, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n plot_item01 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[1], :, :],\n 'contour_fill_levels': np.linspace(70, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(70, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n plot_item02 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[2], :, :],\n 'contour_fill_levels': np.linspace(80, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(80, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n\n column_titles = [\"200 m\", \"300 m\", \"400 m\"]\n plot_items = [plot_item00, plot_item01, plot_item02]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)\n\n linspace10 = np.linspace(0., 11., 21)\n plot_item10 = {\n 'data': -(100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[0], :, :])+\n (100.-nc.variables[\"p_ceiling_rank40\"][baseline_height_ceiling_id, :, :]),\n 'contour_fill_levels': linspace10,\n 'contour_line_levels': sorted([1.1]+list(linspace10[::4])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': linspace10[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability decrease [%]',\n }\n linspace11 = np.linspace(0., 23., 21)\n plot_item11 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[1], :, :])-\n (100.-nc.variables[\"p_ceiling_rank40\"][baseline_height_ceiling_id, :, :]),\n 'contour_fill_levels': linspace11,\n 'contour_line_levels': sorted([2.3]+list(linspace11[::4])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': linspace11[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n linspace12 = np.linspace(0., 38., 21)\n plot_item12 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[2], :, :])-\n (100.-nc.variables[\"p_ceiling_rank40\"][baseline_height_ceiling_id, :, :]),\n 'contour_fill_levels': linspace12,\n 'contour_line_levels': sorted([3.8]+list(linspace12[::4])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': linspace12[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n\n column_titles = None\n plot_items = [plot_item10, plot_item11, plot_item12]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)", "def get_plot(sample):\n scale = (CANVAS_DIM/PATCH_DIM)\n ego_pose = sample[0]\n map_mask = sample[2]\n\n fig, ax = plt.subplots()\n ax.set_ylim([0, CANVAS_DIM]) # set the bounds to be 10, 10\n ax.set_xlim([0, CANVAS_DIM])\n ax.imshow(map_mask[0])\n\n for vehicle in sample[1]:\n plot_vehicle(ax, vehicle, ego_pose, scale)\n\n plt.show()", "def plot(self):\n pass" ]
[ "0.6300367", "0.61281216", "0.6091596", "0.60315055", "0.59216946", "0.5900686", "0.5838783", "0.5722116", "0.55689114", "0.5540599", "0.5525509", "0.5439819", "0.54131126", "0.540916", "0.5403627", "0.53976256", "0.53795195", "0.53759587", "0.53333044", "0.53327256", "0.53212583", "0.5320267", "0.5310172", "0.53059155", "0.53008276", "0.5278984", "0.5265435", "0.5264627", "0.52614826", "0.5259024", "0.5258362", "0.5239729", "0.52357656", "0.5202442", "0.5185372", "0.51759267", "0.51465344", "0.514281", "0.51417696", "0.51417404", "0.5137219", "0.512966", "0.5129073", "0.51204675", "0.5116676", "0.51142806", "0.51135874", "0.51100177", "0.5106042", "0.5105226", "0.5104101", "0.51033676", "0.5103143", "0.50977975", "0.50732684", "0.50731826", "0.5072", "0.50713885", "0.50703436", "0.5067231", "0.5065985", "0.5065909", "0.5064068", "0.5063286", "0.5063254", "0.506295", "0.5058828", "0.5051558", "0.50513685", "0.5041298", "0.5031604", "0.5022465", "0.50222087", "0.5019366", "0.50154084", "0.5010728", "0.5008395", "0.50065285", "0.5006189", "0.5006189", "0.50060594", "0.5006004", "0.50055695", "0.4996312", "0.49841994", "0.49810112", "0.49759847", "0.49747357", "0.49747157", "0.4972897", "0.4972247", "0.49709237", "0.49699584", "0.49698067", "0.49683443", "0.49661776", "0.49628136", "0.49611685", "0.4959727", "0.49582767" ]
0.6172026
1
Fills a coffea.hist.Hist for a single distribution. Takes a list of Dataset objects, and a function `get_array` that should return a numpylike array when given an arrays object. Also requires a string `name` to know in which hist to fill it
def hist_single_distribution( arrays_iterator, get_array, varname='somevar', vartitle=None, distrname='somedistr', distrtitle=None, hist=None, left=-1., right=1., nbins=50 ): if hist is None: import coffea.hist vartitle = varname if vartitle is None else vartitle hist = coffea.hist.Hist( "Count", coffea.hist.Bin(varname, vartitle, nbins, left, right), coffea.hist.Cat('label', varname), ) for arrays, dataset in arrays_iterator: print(dataset.get_weight(), get_array(arrays)) hist.fill(label=distrname, weight=dataset.get_weight(), **{varname: get_array(arrays)}) return hist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Fill(self, *args, **kwargs):\n self._varexp = kwargs.get(\"varexp\")\n self._cuts = kwargs.get(\"cuts\", [])\n self._weight = kwargs.get(\"weight\", \"1\")\n if len(args) == 1 and isinstance(args[0], (str, unicode)):\n IOManager.FillHistogram(self, args[0], **kwargs)\n if not kwargs.get(\"append\", False):\n self._errorband.Reset()\n self._errorband.Add(self)\n else:\n super(Histo1D, self).Fill(*args)", "def getHist(self, name, **kwargs):\n extra = kwargs.get(\"extra\", \"\")\n\n hist1 = [\n self._f.Get(\n \"{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}{0[extra]}\".format(\n {\n \"dir\": self._directory1,\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"pT\": i,\n \"extra\": extra,\n }\n )\n ).Clone()\n for i in range(self._range[0], self._range[1])\n ] # Get jT histograms from file an array\n hist2 = [\n self._f2.Get(\n \"{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}{0[extra]}\".format(\n {\n \"dir\": self._directory2,\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"pT\": i,\n \"extra\": extra,\n }\n )\n ).Clone()\n for i in range(self._range[0], 9)\n ] # Get jT histograms from file an array\n hist = [\n hist1[i] if (i < self._range[1] - self._range[0]) else hist2[i]\n for i in range(0, 9 - self._range[0])\n ]\n # print('{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}'.format({'dir':self._directory, 'histname':name,'NFin':self._NFIN,'pT':1}))\n jetPt = parse_jet_pt_bins(hist)\n\n for h, N, bgN, rndmbgN in zip(\n hist, self._measN, self._measBgN, self._measRndmBgN\n ):\n h.Sumw2()\n # print(\"Rebinning {} by {} in set {} that has {} bins\".format(h.GetTitle(), self._rebin, self._name, h.GetNbinsX()))\n h.Rebin(self._rebin)\n if self.properties.get(\"isWeight\", False):\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n else:\n if kwargs.get(\"isBg\", False):\n h.SetLineColor(self.properties.get(\"color\", 1) + 1)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 1)\n h.Scale(1.0 / bgN, \"width\")\n print(\"{} is bg\".format(name))\n elif kwargs.get(\"isRndmBg\", False):\n print(\"Is random background\")\n h.SetLineColor(self.properties.get(\"color\", 1) + 2)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 2)\n h.Scale(1.0 / rndmbgN, \"width\")\n print(\"Scale by {}\".format(rndmbgN))\n else:\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n h.Scale(1.0 / N, \"width\")\n\n h.SetMarkerStyle(self.properties.get(\"style\", 24))\n h.SetMarkerSize(0.5)\n h.SetLineColor(1)\n\n if kwargs.get(\"jetpt\", False):\n return hist, jetPt\n else:\n return hist", "def hist_aggregate(hist_name, hist_dim=1, norm=None, **hist_args):\n\tdef decorator(fn):\n\t\tdef _inner(vals, hist_collection):\n\t\t\tvals = fn(vals)\n\n\t\t\t# we want to be able to handle dicts\n\t\t\t# for the case where multiple instances of the \"same\" hist\n\t\t\t# separated by a selection (the dict key) are returned.\n\t\t\t# if that *isn't* what happened, turn it into a dict with a single key.\n\t\t\tif not isinstance(vals, dict):\n\t\t\t\tvals = {None: vals}\n\n\t\t\tfor subsample, vs in vals.items():\n\t\t\t\tfull_hist_name = \"%s_%s\" % (hist_name, subsample) if subsample else hist_name\n\t\t\t\tif hist_dim == 1:\n\t\t\t\t\thist, bins = numpy.histogram(vs, **hist_args)\n\t\t\t\telif hist_dim == 2:\n\t\t\t\t\tif len(vs) == 0:\n\t\t\t\t\t\treturn\n\t\t\t\t\thist, binsx, binsy = numpy.histogram2d(*vs, **hist_args)\n\t\t\t\t\tbins = (binsx, binsy)\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Unsupported histogram dimension: \" + str(hist_dim))\n\n\t\t\t\tif full_hist_name in hist_collection:\n\t\t\t\t\th = hist_collection[full_hist_name]\n\t\t\t\t\tif h.dim == 1:\n\t\t\t\t\t\tassert all(h.bins == bins)\n\t\t\t\t\telif h.dim == 2:\n\t\t\t\t\t\tassert all([numpy.array_equal(h.bins[i], bins[i]) for i in range(len(h.bins))])\n\t\t\t\t\thist_collection[full_hist_name].data += hist\n\t\t\t\telse:\n\t\t\t\t\th = Hist(dim=hist_dim, bins=bins, data=hist, norm=norm)\n\t\t\t\t\thist_collection[full_hist_name] = h\n\n\t\treturn _inner\n\n\treturn decorator", "def histogram(\n *args,\n bins=None,\n range=None,\n dim=None,\n weights=None,\n density=False,\n block_size=\"auto\",\n keep_coords=False,\n bin_dim_suffix=\"_bin\",\n):\n\n args = list(args)\n N_args = len(args)\n\n # TODO: allow list of weights as well\n N_weights = 1 if weights is not None else 0\n\n for a in args:\n if not isinstance(a, xr.DataArray):\n raise TypeError(\n \"xhistogram.xarray.histogram accepts only xarray.DataArray \"\n + f\"objects but a {type(a).__name__} was provided\"\n )\n\n for a in args:\n assert a.name is not None, \"all arrays must have a name\"\n\n # we drop coords to simplify alignment\n if not keep_coords:\n args = [da.reset_coords(drop=True) for da in args]\n if N_weights:\n args += [weights.reset_coords(drop=True)]\n # explicitly broadcast so we understand what is going into apply_ufunc\n # (apply_ufunc might be doing this by itself again)\n args = list(xr.align(*args, join=\"exact\"))\n\n # what happens if we skip this?\n # args = list(xr.broadcast(*args))\n a0 = args[0]\n a_coords = a0.coords\n\n # roll our own broadcasting\n # now manually expand the arrays\n all_dims = [d for a in args for d in a.dims]\n all_dims_ordered = list(OrderedDict.fromkeys(all_dims))\n args_expanded = []\n for a in args:\n expand_keys = [d for d in all_dims_ordered if d not in a.dims]\n a_expanded = a.expand_dims({k: 1 for k in expand_keys})\n args_expanded.append(a_expanded)\n\n # only transpose if necessary, to avoid creating unnecessary dask tasks\n args_transposed = []\n for a in args_expanded:\n if a.dims != all_dims_ordered:\n args_transposed.append(a.transpose(*all_dims_ordered))\n else:\n args.transposed.append(a)\n args_data = [a.data for a in args_transposed]\n\n if N_weights:\n weights_data = args_data.pop()\n else:\n weights_data = None\n\n if dim is not None:\n dims_to_keep = [d for d in all_dims_ordered if d not in dim]\n axis = [args_transposed[0].get_axis_num(d) for d in dim]\n else:\n dims_to_keep = []\n axis = None\n\n h_data, bins = _histogram(\n *args_data,\n weights=weights_data,\n bins=bins,\n range=range,\n axis=axis,\n density=density,\n block_size=block_size,\n )\n\n # create output dims\n new_dims = [a.name + bin_dim_suffix for a in args[:N_args]]\n output_dims = dims_to_keep + new_dims\n\n # create new coords\n bin_centers = [0.5 * (bin[:-1] + bin[1:]) for bin in bins]\n new_coords = {\n name: ((name,), bin_center, a.attrs)\n for name, bin_center, a in zip(new_dims, bin_centers, args)\n }\n\n # old coords associated with dims\n old_dim_coords = {name: a0[name] for name in dims_to_keep if name in a_coords}\n\n all_coords = {}\n all_coords.update(old_dim_coords)\n all_coords.update(new_coords)\n # add compatible coords\n if keep_coords:\n for c in a_coords:\n if c not in all_coords and set(a0[c].dims).issubset(output_dims):\n all_coords[c] = a0[c]\n\n output_name = \"_\".join([\"histogram\"] + [a.name for a in args[:N_args]])\n\n da_out = xr.DataArray(h_data, dims=output_dims, coords=all_coords, name=output_name)\n\n return da_out\n\n # we need weights to be passed through apply_func's alignment algorithm,\n # so we include it as an arg, so we create a wrapper function to do so\n # this feels like a hack\n # def _histogram_wrapped(*args, **kwargs):\n # alist = list(args)\n # weights = [alist.pop() for n in _range(N_weights)]\n # if N_weights == 0:\n # weights = None\n # elif N_weights == 1:\n # weights = weights[0] # squeeze\n # return _histogram(*alist, weights=weights, **kwargs)", "def _make_hist(self, oned_arr):\n hist_ = np.histogram(\n a=oned_arr,\n bins=self.null_distributions_[\"histogram_bins\"],\n range=(\n np.min(self.null_distributions_[\"histogram_bins\"]),\n np.max(self.null_distributions_[\"histogram_bins\"]),\n ),\n density=False,\n )[0]\n return hist_", "def addHistogram1D(self, name, title, n_bins, minimum, maximum):\n\t\tself.histograms[ name ] = ROOT.TH1F(name, title, n_bins, minimum, maximum)", "def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):\n values = make_np(values)\n self.vis.histogram(make_np(values), opts={'title': tag})", "def getHist(self, name, **kwargs):\n extra = kwargs.get(\"extra\", \"\")\n print(name)\n print(extra)\n format_string = \"{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}{0[extra]}\"\n\n if \"dir\" in kwargs:\n hist = [\n self._f.Get(\n format_string.format(\n {\n \"dir\": kwargs[\"dir\"],\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"pT\": i,\n \"extra\": extra,\n }\n )\n ).Clone()\n for i in range(self._range[0], self._range[1])\n ] # Get jT histograms from file an array\n else:\n hist = [\n self._f.Get(\n format_string.format(\n {\n \"dir\": self._directory,\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"pT\": i,\n \"extra\": extra,\n }\n )\n ).Clone()\n for i in range(self._range[0], self._range[1])\n ] # Get jT histograms from file an array\n # print('{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}'.format({'dir':self._directory, 'histname':name,'NFin':self._NFIN,'pT':1}))\n jetPt = parse_jet_pt_bins(hist)\n\n if \"LeadingRef\" in name:\n normalization = [\n self._f.Get(\n \"{}/LeadingRefJetPtBin/LeadingRefJetPtBinNFin{:02d}JetPt{:02d}\".format(\n self._directory, self._NFIN, i\n )\n ).Integral()\n for i in range(self._range[0], self._range[1])\n ] # Get number of jets by jet pT bins\n print(\"Normalization set to LeadingRef\")\n print(normalization)\n print(\"Before:\")\n print(self._measN)\n else:\n normalization = self._measN\n\n if self.properties.get(\"isWeight\", False):\n normalizer = range(10)\n else:\n if kwargs.get(\"isBg\", False):\n normalizer = self._measBgN\n elif kwargs.get(\"isRndmBg\", False):\n normalizer = self._measRndmBgN\n else:\n normalizer = normalization\n\n for h, N in zip(hist, normalizer):\n h.Sumw2()\n # print(\"Rebinning {} by {} in set {} that has {} bins\".format(h.GetTitle(), self._rebin, self._name, h.GetNbinsX()))\n h.Rebin(self._rebin)\n print(kwargs)\n if self.properties.get(\"isWeight\", False):\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n else:\n h.Scale(1.0 / N, \"width\")\n if kwargs.get(\"isBg\", False):\n h.SetLineColor(self.properties.get(\"color\", 1) + 1)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 1)\n elif kwargs.get(\"isRndmBg\", False):\n h.SetLineColor(self.properties.get(\"color\", 1) + 2)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 2)\n else:\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n\n h.SetMarkerStyle(self.properties.get(\"style\", 24))\n h.SetMarkerSize(0.5)\n h.SetLineColor(1)\n\n if kwargs.get(\"jetpt\", False):\n return hist, jetPt\n else:\n return hist", "def np_histogram(data, title, bins=\"auto\"):\n figure = plt.figure()\n canvas = figure.canvas\n plt.hist(data, bins=bins)\n plt.title(title)\n\n canvas.draw()\n w, h = canvas.get_width_height()\n np_hist = np.fromstring(canvas.get_renderer().tostring_rgb(), dtype=np.uint8).reshape(h, w, 3)\n plt.close(figure)\n util.np_info(np_hist)\n return np_hist", "def build_hist(concept_values: np.ndarray, num_bins: int = 100) -> np.ndarray:\n hist, _ = np.histogram(concept_values, bins=num_bins, range=(0., 1.), density=True)\n return hist", "def hist(bins, y, /, axis=0):\n if bins.ndim != 1:\n raise ValueError('Bins must be 1-dimensional.')\n\n with quack._ArrayContext(y, push_right=axis) as context:\n # Get flattened data\n y = context.data\n yhist = np.empty((y.shape[0], bins.size - 1))\n\n # Take histogram\n for k in range(y.shape[0]):\n yhist[k, :] = np.histogram(y[k, :], bins=bins)[0]\n\n # Replace data\n context.replace_data(yhist)\n\n # Return unflattened data\n return context.data", "def data_hist(xvar, yvar, datahist, nbins=95):\n hists = [datahist[j].createHistogram(\n 'hdata{0}{1}'.format(c, i),\n xvar, RooFit.Binning(nbins),\n RooFit.YVar(yvar, RooFit.Binning(nbins))\n ) for j, (i, c) in enumerate(ic)]\n return hists", "def _get_hist_data(self,hists,data):\n try:\n for hist in hists:\n self._get_hist_data(hist,data)\n except TypeError:\n hist_dict = {\"name\" : hists.hist.GetName(),\"cut_labels\" : hists.cut_labels, \"use_for_eff\" : self.use_for_eff}\n data.append(hist_dict)\n return data", "def __init__(self, array, compute_histogram=True):\n\n self.data = array\n self.histogram = np.array([])\n self.dim_x = array.shape[0]\n self.dim_y = array.shape[1]\n self.dim_z = array.shape[2]\n\n if compute_histogram:\n self.compute_histogram()", "def collect_absolute_value(self, name_to_arr):\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr) # noqa: PLW2901\n data_arr = data_arr.flatten() # noqa: PLW2901\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n\n data_arr = np.absolute(data_arr) # only consider absolute value # noqa: PLW2901\n\n if tensor not in self.histogram_dict:\n # first time it uses num_bins to compute histogram.\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor] = (hist, hist_edges, min_value, max_value)\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n # increase the number of bins\n width = old_hist_edges[1] - old_hist_edges[0]\n # NOTE: np.arange may create an extra bin after the one containing temp_amax\n new_bin_edges = np.arange(old_hist_edges[-1] + width, temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[: len(old_hist)] += old_hist\n self.histogram_dict[tensor] = (hist, hist_edges, min(old_min, min_value), max(old_max, max_value))", "def collect_value(self, name_to_arr):\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr) # noqa: PLW2901\n data_arr = data_arr.flatten() # noqa: PLW2901\n\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n\n threshold = max(abs(min_value), abs(max_value))\n\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold\n )\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins, range=(-threshold, threshold))\n self.histogram_dict[tensor] = (\n hist,\n hist_edges,\n min_value,\n max_value,\n threshold,\n )", "def Hist2DUUID(*args,**kargs):\n func = ROOT.TH2F\n if \"TH2D\" in kargs and kargs[\"TH2D\"]:\n func = ROOT.TH2D\n if \"TEfficiency\" in kargs and kargs[\"TEfficiency\"]:\n func = ROOT.TEfficiency\n name = uuid.uuid1().hex\n hist = None\n if len(args) == 2 and type(args[0]) == list and type(args[1]) == list:\n hist = func(name,\"\",len(args[0])-1,array.array('f',args[0]),len(args[1])-1,array.array('f',args[1]))\n elif len(args) == 6:\n for i in range(6):\n if not isinstance(args[i],numbers.Number):\n raise Exception(i,\"th argument is not a number\")\n hist = func(name,\"\",args[0],args[1],args[2],args[3],args[4],args[5])\n elif len(args) == 4:\n if type(args[0]) == list:\n for i in range(1,4):\n if not isinstance(args[i],numbers.Number):\n raise Exception(i,\"th argument is not a number\")\n hist = func(name,\"\",len(args[0])-1,array.array('d',args[0]),args[1],args[2],args[3])\n elif type(args[3]) == list:\n for i in range(3):\n if not isinstance(args[i],numbers.Number):\n raise Exception(i,\"th argument is not a number\")\n hist = func(name,\"\",args[0],args[1],args[2],len(args[3])-1,array.array('d',args[3]))\n else:\n raise Exception(\"Hist: Innapropriate arguments, requires either nBins, low, high or a list of bin edges:\",args)\n return hist", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):", "def genHistArrays(df,csname,bins=50):\n #initiate matrix which will contain values of histograms\n allpixV = np.zeros((df.shape[0],bins*3))\n #attain histograms\n hists = df['SKImage'].apply(lambda x: getHists(x,bins))\n \n #Generate column names for result dataframe\n fullnames = []\n for chs in ['CH1', 'CH2', 'CH3']:\n fullnames.extend([chs+'-'+str(j) for j in range(bins)])\n fullnames = [csname+'-'+str(j) for j in fullnames]\n \n #extract histograms\n for rowi, histArr in enumerate(hists):\n allpixV[rowi,:] = np.array(histArr).flatten()\n \n return allpixV,fullnames", "def array_converter(roodataobject,obs_name):\n try:\n from numpy import array\n except ImportError:\n from array import array as array\n\n # Create the histogram with respect the observable\n histo = roodataobject.createHistogram(obs_name)\n # Normalize\n histo.Scale(1.0/histo.Integral())\n _provlist = []\n for i in xrange(1,histo.GetNbinsX()+1):\n _provlist.append(histo.GetBinContent(i))\n\n # the output array\n try:\n harray = array([ x for x in _provlist ],dtype='d')\n except TypeError:\n harray = array('d',[ x for x in _provlist ])\n return harray", "def HistUUID(*args,**kargs):\n func = ROOT.TH1F\n if \"TH1D\" in kargs and kargs[\"TH1D\"]:\n func = ROOT.TH1D\n if \"TEfficiency\" in kargs and kargs[\"TEfficiency\"]:\n func = ROOT.TEfficiency\n name = uuid.uuid1().hex\n hist = None\n if len(args) == 1 and type(args[0]) == list:\n hist = func(name,\"\",len(args[0])-1,array.array('f',args[0]))\n elif len(args) == 3:\n for i in range(3):\n if not isinstance(args[i],numbers.Number):\n raise Exception(i,\"th argument is not a number\")\n hist = func(name,\"\",args[0],args[1],args[2])\n else:\n raise Exception(\"Hist: Innapropriate arguments, requires either nBins, low, high or a list of bin edges:\",args)\n return hist", "def histogram(arr, xlbl, xrng=None, nbins=20, alpha=1.):\n if xrng is None:\n xrng = (np.min(arr),np.max(arr))\n p = figure(plot_width=600, plot_height=400)\n # Histogram\n hist, edges = np.histogram(arr, range=xrng, density=True, bins=nbins)\n p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], fill_color='blue', alpha=alpha)\n # Label\n p.xaxis.axis_label = xlbl\n # Show\n show(p)", "def glGetHistogram( baseFunction, target, reset, format, type, values=None):\r\n if values is None:\r\n width = glGetHistogramParameteriv(\r\n target,\r\n GL_HISTOGRAM_WIDTH,\r\n )\r\n values = images.images.SetupPixelRead( format, (width,4), type )\r\n arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[\r\n images.images.TYPE_TO_ARRAYTYPE.get(type,type)\r\n ]\r\n baseFunction(\r\n target, reset, format, type,\r\n ctypes.c_void_p( arrayType.dataPointer(values))\r\n )\r\n return values", "def addHistogram2D(self, name, title, n_bins_x, minimum_x, maximum_x, n_bins_y, minimum_y, maximum_y):\n\t\tself.histograms[ name ] = ROOT.TH2F(name, title, n_bins_x, minimum_x, maximum_x, n_bins_y, minimum_y, maximum_y)", "def just_histogram(*args, **kwargs):\n return np.histogram(*args, **kwargs)[0].astype(float)", "def hist_data(list_source, frq=151, ln=False, data_lim=None):\n fluxes = []\n\n if data_lim is not None:\n min_acceptable = data_lim[0]\n else:\n min_acceptable = None\n if data_lim is not None:\n max_acceptable = data_lim[1]\n else:\n max_acceptable = None\n \n for gleam_obj in list_source:\n I = gleam_obj.flux_by_frq[frq]\n if is_constrained(I, min_acceptable, max_acceptable):\n if ln:\n fluxes.append(np.log(I))\n else:\n fluxes.append(I)\n \n return np.array(fluxes)", "def histogram(*args, bins=None, dim=None, weights=None, density=False,\n block_size='auto', bin_dim_suffix='_bin',\n bin_edge_suffix='_bin_edge'):\n\n N_args = len(args)\n\n # TODO: allow list of weights as well\n N_weights = 1 if weights is not None else 0\n\n # some sanity checks\n # TODO: replace this with a more robust function\n assert len(bins)==N_args\n for bin in bins:\n assert isinstance(bin, np.ndarray), 'all bins must be numpy arrays'\n\n for a in args:\n # TODO: make this a more robust check\n assert a.name is not None, 'all arrays must have a name'\n\n # we drop coords to simplify alignment\n args = [da.reset_coords(drop=True) for da in args]\n if N_weights:\n args += [weights.reset_coords(drop=True)]\n # explicitly broadcast so we understand what is going into apply_ufunc\n # (apply_ufunc might be doing this by itself again)\n args = list(xr.align(*args, join='exact'))\n\n\n\n # what happens if we skip this?\n #args = list(xr.broadcast(*args))\n a0 = args[0]\n a_dims = a0.dims\n\n # roll our own broadcasting\n # now manually expand the arrays\n all_dims = [d for a in args for d in a.dims]\n all_dims_ordered = list(OrderedDict.fromkeys(all_dims))\n args_expanded = []\n for a in args:\n expand_keys = [d for d in all_dims_ordered if d not in a.dims]\n a_expanded = a.expand_dims({k: 1 for k in expand_keys})\n args_expanded.append(a_expanded)\n\n # only transpose if necessary, to avoid creating unnecessary dask tasks\n args_transposed = []\n for a in args_expanded:\n if a.dims != all_dims_ordered:\n args_transposed.append(a.transpose(*all_dims_ordered))\n else:\n args.transposed.append(a)\n args_data = [a.data for a in args_transposed]\n\n if N_weights:\n weights_data = args_data.pop()\n else:\n weights_data = None\n\n if dim is not None:\n dims_to_keep = [d for d in all_dims_ordered if d not in dim]\n axis = [args_transposed[0].get_axis_num(d) for d in dim]\n else:\n dims_to_keep = []\n axis = None\n\n h_data = _histogram(*args_data, weights=weights_data, bins=bins, axis=axis,\n block_size=block_size)\n\n # create output dims\n new_dims = [a.name + bin_dim_suffix for a in args[:N_args]]\n output_dims = dims_to_keep + new_dims\n\n # create new coords\n bin_centers = [0.5*(bin[:-1] + bin[1:]) for bin in bins]\n new_coords = {name: ((name,), bin_center, a.attrs)\n for name, bin_center, a in zip(new_dims, bin_centers, args)}\n\n old_coords = {name: a0[name]\n for name in dims_to_keep if name in a0.coords}\n all_coords = {}\n all_coords.update(old_coords)\n all_coords.update(new_coords)\n\n # CF conventions tell us how to specify cell boundaries\n # http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html#cell-boundaries\n # However, they require introduction of an additional dimension.\n # I don't like that.\n edge_dims = [a.name + bin_edge_suffix for a in args[:N_args]]\n edge_coords = {name: ((name,), bin_edge, a.attrs)\n for name, bin_edge, a in zip(edge_dims, bins, args)}\n\n output_name = '_'.join(['histogram'] + [a.name for a in args[:N_args]])\n\n da_out = xr.DataArray(h_data, dims=output_dims, coords=all_coords,\n name=output_name)\n return da_out\n\n # we need weights to be passed through apply_func's alignment algorithm,\n # so we include it as an arg, so we create a wrapper function to do so\n # this feels like a hack\n # def _histogram_wrapped(*args, **kwargs):\n # alist = list(args)\n # weights = [alist.pop() for n in range(N_weights)]\n # if N_weights == 0:\n # weights = None\n # elif N_weights == 1:\n # weights = weights[0] # squeeze\n # return _histogram(*alist, weights=weights, **kwargs)", "def super_hist(self, data_list, alpha=0.5, log_scale=True, bins=45):\r\n\r\n fig, _ = mp.subplots(1, 1, figsize=(15, 10), constrained_layout=True)\r\n\r\n names = []\r\n for data in data_list:\r\n plot_data = data[data.Day_First_N_Infections != \"None\"]\r\n column_data = plot_data[\"Day_First_N_Infections\"].values\r\n sns.distplot(column_data,\r\n kde=False,\r\n bins=bins,\r\n hist_kws={\r\n \"linewidth\": 1,\r\n \"alpha\": alpha,\r\n \"edgecolor\": 'black',\r\n \"log\": log_scale\r\n })\r\n\r\n mp.legend(loc='upper left', fontsize=20)\r\n mp.xlabel(\"Days from outbreak to case number \" + str(data_list[0].N) +\r\n \" in county\",\r\n fontsize=18)\r\n mp.ylabel(\"Frequency\", fontsize=18)\r\n\r\n fig.savefig(\"hist_N\" + str(data_list[0].N) + \"_\" + \"_\".join(names) +\r\n \".png\")", "def makeHist(data, bins, wgt=None, factor=1.0, pdf=False):\n n_arr, bins = np.histogram(data, bins, weights=wgt)\n ctr_bins = centerOfBins(bins)\n \n if pdf == True:\n n_arr = asFloat(n_arr) / (float(sum(n_arr)) * (bins[1:] - bins[:-1]))\n else:\n n_arr = asFloat(n_arr) * factor\n \n return n_arr, ctr_bins", "def get_proto_hist(var, name, nbins=None):\n ## Basic default settings, which will be used\n ## NOTE: In the future it is planned to make it possible to override these\n ## via a JSON file\n logging.debug('Getting prototype histogram for var: {}'.format(var))\n\n # there were root versions where this lead to automatic binning -> default\n hist = r.TH1D('', '', 100, 1, -1)\n\n hist_var = get_key_from_var(var)\n if hist_var:\n histset = default_hist_set[hist_var]\n if nbins is None:\n nbins = histset['n_bins']\n logging.debug('Using histogram settings {}'.format(histset))\n hist = r.TH1D(name, '',\n histset['n_bins'], histset['min'], histset['max'])\n set_hist_opts(hist)\n else:\n logging.warning('Could not get histogram settings for var: {}'\n .format(var))\n\n return hist", "def histogram(input, num_bins, filename, title = None, xlabel = None, ylabel = None, **bar_kwargs): \n # Checking for errors in the input\n if not isinstance(num_bins, int):\n raise TypeError('Please enter an integer number of bins.')\n if 200 % num_bins != 0:\n raise ValueError('The original number of intervals must be divisible by the number of bins inputted.')\n \n x_bins = np.linspace(0, 10, num = num_bins, endpoint = False) # Generates list of x-values (in this case energies) at the start of each bin interval\n bin_size = x_bins[1] - x_bins[0] # Calculating size of bins\n midpoints = [i + (bin_size/2) for i in x_bins] # List of midpoints generated for the centre of each bar\n\n down_fact = len(input) / num_bins # Calculating the downsampling factor\n if down_fact != 1: \n down_arr = hist_downsample(input, down_fact) # Downsampling the input data\n else:\n down_arr = input # If the number of bins needed is equal to the length of the input data, no need to downsample\n\n plot_settings(grid = True) # Defining plot settings\n plt.bar(midpoints, height = down_arr, width = bin_size, **bar_kwargs) # Width of bar is set to the size of the histogram bin\n\n # Adds a title or axis labels if specified by the user\n if title != None:\n plt.title(title)\n if xlabel != None:\n plt.xlabel(xlabel)\n if ylabel != None:\n plt.ylabel(ylabel)\n \n # f-string allows save filepath to be set inside the plt.savefig() function\n plt.savefig(f'{os.path.join(plot_path,filename)}.pdf', dpi = 200) # Saving the plot in the 'plots' folder (filepath set using the 'os' package)", "def getHistByName(self, name, ij, **kwargs):\n try:\n if \"dir\" in kwargs:\n hist = self._f.Get(\n \"{0[dir]}/{0[histname]}\".format(\n {\"dir\": kwargs[\"dir\"], \"histname\": name}\n )\n ).Clone() # Get a single jT histogram from file\n else:\n hist = self._f.Get(\n \"{0[dir]}/{0[histname]}\".format(\n {\"dir\": self._directory, \"histname\": name}\n )\n ).Clone() # Get a single jT histogram from file\n except rootpy.io.file.DoesNotExist:\n return None\n hist.Sumw2()\n hist.Rebin(self._rebin)\n if kwargs.get(\"isBg\", False):\n N = self._measBgN[ij]\n else:\n N = self._measN[ij]\n if self.properties.get(\"isWeight\", False) or kwargs.get(\"isWeight\", False):\n pass\n else:\n hist.Scale(1.0 / N, \"width\")\n return hist", "def histogram_and_curves( array, mean = 0.0, std_dev = 1.0, bins = None, x_lims = None, y_lims = None, x_axis = 'X', y_axis = 'Y', title = 'Title', show = True, filename = None, curve_list = None, labels = None, **kwargs ):\n\n color = 'k'\n bgcolor = 'w'\n style = 'stepfilled'\n\n # Set up figure and axes\n fig = plt.figure( figsize = ( 6, 6 ) )\n ax = fig.add_subplot( 111, facecolor = bgcolor )\n xText = ax.set_xlabel( x_axis )\n yText = ax.set_ylabel( y_axis )\n title = ax.set_title( title )\n\n # Convert any lists or dicts to numpy arrays\n if not isinstance( array, np.ndarray ):\n array = np.array( array )\n\n if array.ndim is 1:\n\n # Number of histogram bins to use (if not supplied by user)\n if bins is None:\n step = ( math.ceil( np.amax( array ) ) - math.floor( np.amin( array ) ) ) / ( 20 * abs( math.ceil( np.amax( array ) ) ) )\n bins = np.arange( math.floor( np.amin( array ) ), math.ceil( np.amax( array ) ), step )\n\n if not x_lims:\n x_min = mean - ( 4 * std_dev )\n x_max = mean + ( 4 * std_dev )\n else:\n x_min, x_max = x_lims\n\n # Linespace for curve plotting\n t = np.arange( x_min , x_max, 0.01)\n\n # Plot the 1D histogram\n n, bins, patches = ax.hist( array, bins = bins, density = True, color = color, histtype = style, linewidth = 2, **kwargs )\n\n xlim = ax.set_xlim( x_min, x_max )\n ylim = ax.set_ylim( 0, 1.2 * np.amax( n ) )\n\n # Plot distribution curves\n if curve_list:\n for i, curve in enumerate( curve_list ):\n\n # Selects color from list\n color_index = i % len( color_list )\n color = color_list[ color_index ]\n\n # Find the number of fitting arguments in the desired curve\n p0_len = u.get_unique_fitting_parameter_length( curve )\n\n if not p0_len:\n p0 = [ mean, std_dev ]\n else:\n p0 = np.ones( p0_len )\n\n\n # Try fitting and plotting. If fit doesn't work, just plot the histogram\n try:\n try:\n params = opt.curve_fit( curve, bins[1:], n, p0 = p0 )\n except RuntimeError:\n continue\n\n line, = ax.plot( t, curve( t, *params[0] ), color = color, linewidth = 2 )\n except TypeError:\n line, = ax.plot( t, curve( t ), color = color, linewidth = 2 )\n\n if labels:\n leg_labs = np.zeros( len( labels ) )\n\n if len( labels ) == len( curve_list ):\n leg_labs[i] = line.set_label( labels[i] )\n ax.legend()\n\n\n plt.grid( True )\n\n # Save file\n if filename is not None:\n plt.savefig( filename )\n if show:\n plt.show()\n\n elif array.ndim is 2 and array.shape[0] is 2:\n\n # Basically determines whether mean given was [float, float] or float\n if not x_lims:\n try:\n x_min = mean[0] - ( 4 * std_dev[0] )\n x_max = mean[0] + ( 4 * std_dev[0] )\n except TypeError:\n x_min = mean - ( 4 * std_dev )\n x_max = mean + ( 4 * std_dev )\n else:\n x_min, x_max = x_lims\n\n if not y_lims:\n try:\n y_min = mean[1] - ( 4 * std_dev[1] )\n y_max = mean[1] + ( 4 * std_dev[1] )\n except TypeError:\n y_min = x_min\n y_max = x_max\n else:\n y_min, y_max = y_lims\n\n # Initialize bin size if not parsed in\n if bins is None:\n step = ( math.ceil( np.amax( array ) ) - math.floor( np.amin( array ) ) ) / ( 100 * abs( math.ceil( np.amax( array ) ) ) )\n bins = [ np.arange( math.floor( np.amin( array[0] ) ), math.ceil( np.amax( array[0] ) ), step ), np.arange( math.floor( np.amin( array[1] ) ), math.ceil( np.amax( array[1] ) ), step ) ]\n\n # Plot the 2D histogram\n h, x_edge, y_edge, quad_mesh = ax.hist2d( array[0], array[1], bins = bins, **kwargs )\n\n xlim = ax.set_xlim( x_min, x_max )\n ylim = ax.set_ylim( y_min, y_max )\n\n if filename is not None:\n plt.savefig( filename )\n if show:\n plt.show()\n else:\n plt.close()\n\n elif array.ndim is 2:\n raise DimensionError( \"Invalid array shape. Number of rows required: 2. (Actual: {})\".format( array.shape[0] ) )\n else:\n raise DimensionError( \"Invalid dimensions. Required: 2. (Actual: {})\".format( array.ndim ) )\n\n return ax", "def get2DHist(self, name, **kwargs):\n format_string = \"{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}\"\n\n if \"dir\" in kwargs:\n hist = [\n self._f.Get(format_string.format(\n {\n \"dir\": kwargs[\"dir\"],\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"pT\": i,\n }\n )\n ).Clone()\n for i in range(self._range[0], self._range[1])\n ] # Get jT histograms from file an array\n else:\n hist = [\n self._f.Get(format_string.format(\n {\n \"dir\": self._directory,\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"pT\": i,\n }\n )\n ).Clone()\n for i in range(self._range[0], self._range[1])\n ] # Get jT histograms from file an array\n jetPt = parse_jet_pt_bins(hist)\n\n if kwargs.get(\"jetpt\", False):\n return hist, jetPt\n else:\n return hist", "def getHistPta(self, name, **kwargs):\n hists = []\n format_string = \"{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[jetpT]:02d}TrkPt{0[trkpT]:02d}\"\n\n for i in range(self._range[0], self._range[1]):\n if \"dir\" in kwargs:\n hist = [\n self._f.Get(\n format_string.format(\n {\n \"dir\": kwargs[\"dir\"],\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"jetpT\": i,\n \"trkpT\": j,\n }\n )\n ).Clone()\n for j in range(0, 11)\n ] # Get jT histograms from file an array\n else:\n hist = [\n self._f.Get(\n format_string.format(\n {\n \"dir\": self._directory,\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"jetpT\": i,\n \"trkpT\": j,\n }\n )\n ).Clone()\n for j in range(0, 11)\n ] # Get jT histograms from file an array\n hists.append(hist)\n # print('{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}'.format({'dir':self._directory, 'histname':name,'NFin':self._NFIN,'pT':1}))\n\n # Get Jet Pt bins\n jetPt = parse_jet_pt_bins(hist)\n\n # Get Track pt Bins\n trkPt = parse_jet_pt_bins(search=\"constituent\")\n\n # print(len(hist))\n # print(hist)\n # print(jetPt)\n for hist, N, bgN in zip(hists, self._measN, self._measBgN):\n for h in hist:\n h.Sumw2()\n # print(\"Rebinning {} by {} in set {} that has {} bins\".format(h.GetTitle(), self._rebin, self._name, h.GetNbinsX()))\n h.Rebin(self._rebin)\n print(kwargs)\n if self.properties.get(\"isWeight\", False):\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n else:\n if kwargs.get(\"isBg\", False):\n h.SetLineColor(self.properties.get(\"color\", 1) + 1)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 1)\n h.Scale(1.0 / bgN, \"width\")\n print(\"{} is bg\".format(name))\n else:\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n h.Scale(1.0 / N, \"width\")\n\n h.SetMarkerStyle(self.properties.get(\"style\", 24))\n h.SetMarkerSize(0.5)\n h.SetLineColor(1)\n\n if kwargs.get(\"jetpt\", False):\n return hist, jetPt, trkPt\n else:\n return hist", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):\n pylab.hist(values, bins = numBins)\n pylab.xlabel(xLabel)\n pylab.ylabel(yLabel)\n if not title == None:\n pylab.title(title)\n pylab.show()", "def histogram1d(data: Union[Array, ArrayLike], bins: Any = None, *, compute: bool = True, **kwargs):\n if not isinstance(data, Array):\n data_np = np.asarray(data)\n data = dask.array.from_array(data_np, chunks=int(data_np.shape[0] / options[\"chunk_split\"]))\n\n if not kwargs.get(\"adaptive\", True):\n raise ValueError(\"Only adaptive histograms supported for dask (currently).\")\n kwargs[\"adaptive\"] = True\n\n def block_hist(array):\n return original_h1(array, bins, **kwargs)\n\n return _run_dask(\n name=\"dask_adaptive1d\",\n data=cast(Array, data),\n compute=compute,\n method=kwargs.pop(\"dask_method\", \"threaded\"),\n func=block_hist,\n )", "def interactive_histograms(adata, keys=['n_counts', 'n_genes'],\n bins=100, min_bins=1, max_bins=1000,\n tools='pan, reset, wheel_zoom, save',\n groups=None, fill_alpha=0.4,\n palette=Set1[9] + Set2[8] + Set3[12],\n legend_loc='top_right', display_all=True,\n *args, **kwargs):\n\n from itertools import product\n from functools import reduce\n from bokeh.plotting import figure, show, ColumnDataSource\n from bokeh.models.widgets import CheckboxGroup\n from bokeh.models.widgets.buttons import Button\n from bokeh.models import Slider\n from bokeh.models.callbacks import CustomJS\n from bokeh.io import output_notebook\n from bokeh.layouts import layout, column, row\n\n from copy import copy\n from numpy import array_split, ceil\n output_notebook()\n\n if min_bins < 1:\n raise ValueError(f'Expected min_bins >= 1, got min_bins={min_bins}.')\n if max_bins < min_bins:\n raise ValueError(f'Expected min_bins <= max_bins, got min_bins={min_bins}, max_bins={max_bins}.')\n if not (bins >= min_bins and bins <= max_bins):\n raise ValueError(f'Expected min_bins <= bins <= max_bins, got min_bins={min_bins}, bins={bins}, max_bins={max_bins}.')\n\n # check the input\n for key in keys:\n if key not in adata.obs.keys() and \\\n key not in adata.var.keys() and \\\n key not in adata.var_names:\n raise ValueError(f'The key `{key}` does not exist in adata.obs, adata.var or adata.var_names.')\n\n def _create_adata_groups():\n if groups is None:\n return [('all',)], [adata]\n\n combs = list(product(*[set(adata.obs[g]) for g in groups]))\n adatas= [adata[reduce(lambda l, r: l & r,\n (adata.obs[k] == v for k, v in zip(groups, vals)), True)]\n for vals in combs] + [adata]\n\n if display_all:\n combs += [('all',)]\n adatas += [adata]\n\n return combs, adatas\n\n # group_v_combs contains the value combinations\n # used for grupping\n group_v_combs, adatas = _create_adata_groups()\n n_plots = len(group_v_combs)\n checkbox_group = CheckboxGroup(active=list(range(n_plots)), width=200)\n\n for key in keys:\n # create histogram\n cols, legends, callbacks = [], [], []\n plot_map = dict()\n slider = Slider(start=min_bins, end=max_bins, value=bins, step=1,\n title='Bins')\n\n fig = figure(*args, tools=tools, **kwargs)\n\n plot_ids = []\n for j, (ad, group_vs) in enumerate(zip(adatas, group_v_combs)):\n\n if ad.n_obs == 0:\n continue\n\n plot_ids.append(j)\n color = palette[len(plot_ids) - 1]\n\n if key in ad.obs.keys():\n orig = ad.obs[key]\n hist, edges = np.histogram(orig, density=True, bins=bins)\n elif key in ad.var.keys():\n orig = ad.var[key]\n hist, edges = np.histogram(orig, density=True, bins=bins)\n else:\n orig = ad[:, key].X\n hist, edges = np.histogram(orig, density=True, bins=bins)\n\n # original data, used for recalculation of histogram in JS code\n orig = ColumnDataSource(data=dict(values=orig))\n # data that we update in JS code\n source = ColumnDataSource(data=dict(hist=hist, l_edges=edges[:-1], r_edges=edges[1:]))\n\n legend = ', '.join(': '.join(map(str, gv)) for gv in zip(groups, group_vs)) \\\n if groups is not None else 'all'\n legends.append(legend)\n # create figure\n p = fig.quad(source=source, top='hist', bottom=0,\n left='l_edges', right='r_edges',\n fill_color=color, legend=legend,\n line_color=\"#555555\", fill_alpha=fill_alpha)\n\n # create callback and slider\n callback = CustomJS(args=dict(source=source, orig=orig), code=_inter_hist_js_code)\n callback.args['bins'] = slider\n callbacks.append(callback)\n\n # add the current plot so that we can set it\n # visible/invisible in JS code\n plot_map[f'p_{j}'] = p\n\n # slider now updates all values\n slider.js_on_change('value', *callbacks)\n plot_map['cb'] = checkbox_group\n\n button = Button(label='Toggle All', button_type='primary')\n code_t='\\n'.join(f'p_{p_id}.visible = false;' for i, p_id in enumerate(plot_ids))\n code_f ='\\n'.join(f'p_{p_id}.visible = true;' for i, p_id in enumerate(plot_ids))\n button.callback = CustomJS(\n args=plot_map,\n code=f'''if (cb.active.length == {len(plot_map) - 1}) {{\n console.log(cb.active);\n cb.active = Array();\n {code_t};\n }} else {{\n console.log(cb.active);\n cb.active = Array.from(Array({len(plot_map) - 1}).keys());\n {code_f};\n }}'''\n )\n\n checkbox_group.callback = CustomJS(\n args=plot_map,\n code='\\n'.join(f'p_{p_id}.visible = cb.active.includes({i});' for i, p_id in enumerate(plot_ids))\n )\n checkbox_group.labels = legends\n\n fig.legend.location = legend_loc\n fig.xaxis.axis_label = key\n fig.yaxis.axis_label = 'normalized frequency'\n fig.plot_width = kwargs.get('plot_width', 400)\n fig.plot_height = kwargs.get('plot_height', 400)\n\n cols.append(column(slider, button, row(fig, checkbox_group)))\n\n\n # transform list of pairs of figures and sliders into list of lists, where\n # each sublist has length <= 2\n # note that bokeh does not like np.arrays\n grid = list(map(list, array_split(cols, ceil(len(cols) / 2))))\n\n show(layout(children=grid, sizing_mode='fixed', ncols=2))", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):\r\n pylab.hist(values, bins = numBins)\r\n pylab.xlabel(xLabel)\r\n pylab.ylabel(yLabel)\r\n if title != None:\r\n pylab.title(title)\r\n pylab.show()", "def push_histogram(self, data):\n # Loop through bands of this tile\n for i, dat in enumerate(data):\n # Create histogram for new data with the same bins\n new_hist = numpy.histogram(dat['data'], bins=self.hist_bins[i])\n # Add counts of this tile to band metadata histogram\n self.hist_values[i] += new_hist[0]", "def import_histo(w,hz,xmin,xmax):\n print 'Loading histogram with',hz.GetEntries(),'entries'\n data = RooDataHist('data','Zmumu MC',RooArgList(w.var('x')),hz)\n return data", "def test_hist_from_arrays():\n\n x_vals = np.random.random(200)\n y_vals = np.random.random(200)\n\n for w in range(1, 4):\n for h in range(1, 4):\n grid = pixhist.from_arrays(\n x_vals, y_vals, w, h, make_xy_proportional=False, log=False\n )\n grid_np = hist_from_arrays_np(x_vals, y_vals, w, h)\n assert np.all(grid == grid_np), f\"values not equal, w={w}, w={h}\"\n\n grid = pixhist.from_arrays(\n x_vals, y_vals, w, h, make_xy_proportional=False, log=True\n )\n assert np.all(\n grid == np.log(grid_np + 1)\n ), f\"Log values not equal, w={w}, h={h}\"", "def fill(self, *args, **kwargs): # noqa: C901\n\n with KWArgs(kwargs) as kw:\n weight = kw.optional(\"weight\")\n sample = kw.optional(\"sample\")\n threads = kw.optional(\"threads\")\n\n # Convert to NumPy arrays\n args = _fill_cast(args)\n weight = _fill_cast(weight)\n sample = _fill_cast(sample)\n\n if threads is None or threads == 1:\n self._hist.fill(*args, weight=weight, sample=sample)\n return self\n\n if threads == 0:\n threads = os.cpu_count()\n\n if (\n self._hist._storage_type is _core.storage.mean\n or self._hist._storage_type is _core.storage.weighted_mean\n ):\n raise RuntimeError(\"Mean histograms do not support threaded filling\")\n\n data = [np.array_split(a, threads) for a in args]\n\n if weight is None or np.isscalar(weight):\n weights = [weight] * threads\n else:\n weights = np.array_split(weight, threads)\n\n if sample is None or np.isscalar(sample):\n samples = [sample] * threads\n else:\n samples = np.array_split(sample, threads)\n\n if self._hist._storage_type is _core.storage.atomic_int64:\n\n def fun(weight, sample, *args):\n self._hist.fill(*args, weight=weight, sample=sample)\n\n else:\n sum_lock = threading.Lock()\n\n def fun(weight, sample, *args):\n local_hist = self._hist.__copy__()\n local_hist.reset()\n local_hist.fill(*args, weight=weight, sample=sample)\n with sum_lock:\n self._hist += local_hist\n\n thread_list = [\n threading.Thread(target=fun, args=arrays)\n for arrays in zip(weights, samples, *data)\n ]\n\n for thread in thread_list:\n thread.start()\n\n for thread in thread_list:\n thread.join()\n\n return self", "def get_hists(args):\n from ROOT import TFile\n files, hnames = expand_targets(args.targets)\n if not args.labels:\n labels = [None for x in files]\n else:\n labels = args.labels\n if len(labels) != len(hnames):\n print \"The number of labels doesn't match the number of hists. Use _None to not specify a label for a given histogram.\"\n raise PlotError\n if not args.colors:\n colors = [style.def_grey for x in files]\n else:\n colors = args.colors\n if len(labels) != len(hnames):\n print \"The number of colors doesn't match the number of hists.\"\n raise PlotError\n\n gopts = {}\n if args.rebin: gopts[\"rebin\"] = args.rebin\n if args.norm: gopts[\"norm\"] = args.norm\n if args.error: gopts[\"error\"] = args.error\n if args.scale: gopts[\"scale\"] = args.scale\n if args.exclude: gopts[\"exclude\"] = args.exclude\n if args.projectionx: gopts[\"projectionx\"] = args.projectionx\n if args.projectiony: gopts[\"projectiony\"] = args.projectiony \n if args.averagex: gopts[\"averagex\"] = args.averagex\n if args.averagey: gopts[\"averagey\"] = args.averagey\n if args.emph: gopts[\"emph\"] = args.emph\n\n hists = []\n if args.sub in ['color2D']:\n for i in range(len(hnames)):\n root_file = TFile.Open(args.inDir + files[i])\n rhist = hist_arithmetic(hnames[i], args.inDir + files[i], root_file)\n hists.append(Hist2D.from_hist(rhist, label=labels[i], **opts)) \n root_file.Close()\n else:\n for i in range(len(hnames)):\n opts = gopts.copy()\n for opt in args.options:\n if opt[\"pos\"] == i:\n opts = dict(gopts.items() + opt.items())\n del opts[\"pos\"]\n break\n root_file = TFile.Open(args.inDir + files[i])\n rhist = hist_arithmetic(hnames[i], args.inDir + files[i], root_file)\n hists.append(Hist.from_hist(rhist, label=labels[i], color=colors[i], **opts))\n root_file.Close()\n return hists", "def interactive_hist(adata, keys=['n_counts', 'n_genes'],\n bins='auto', max_bins=100,\n groups=None, fill_alpha=0.4,\n palette=None, display_all=True,\n tools='pan, reset, wheel_zoom, save',\n legend_loc='top_right',\n plot_width=None, plot_height=None,\n *args, **kwargs):\n\n if max_bins < 1:\n raise ValueError(f'`max_bins` must >= 1')\n\n palette = Set1[9] + Set2[8] + Set3[12] if palette is None else palette\n\n # check the input\n for key in keys:\n if key not in adata.obs.keys() and \\\n key not in adata.var.keys() and \\\n key not in adata.var_names:\n raise ValueError(f'The key `{key}` does not exist in `adata.obs`, `adata.var` or `adata.var_names`.')\n\n def _create_adata_groups():\n if groups is None:\n return [adata], [('all',)]\n\n combs = list(product(*[set(adata.obs[g]) for g in groups]))\n adatas= [adata[reduce(lambda l, r: l & r,\n (adata.obs[k] == v for k, v in zip(groups, vals)), True)]\n for vals in combs] + [adata]\n\n if display_all:\n combs += [('all',)]\n adatas += [adata]\n\n return adatas, combs\n\n # group_v_combs contains the value combinations\n ad_gs = _create_adata_groups()\n \n cols = []\n for key in keys:\n callbacks = []\n fig = figure(*args, tools=tools, **kwargs)\n slider = Slider(start=1, end=max_bins, value=0, step=1,\n title='Bins')\n\n plots = []\n for j, (ad, group_vs) in enumerate(filter(lambda ad_g: ad_g[0].n_obs > 0, zip(*ad_gs))):\n\n if key in ad.obs.keys():\n orig = ad.obs[key]\n hist, edges = np.histogram(orig, density=True, bins=bins)\n elif key in ad.var.keys():\n orig = ad.var[key]\n hist, edges = np.histogram(orig, density=True, bins=bins)\n else:\n orig = ad[:, key].X\n hist, edges = np.histogram(orig, density=True, bins=bins)\n\n slider.value = len(hist)\n\n # original data, used for recalculation of histogram in JS code\n orig = ColumnDataSource(data=dict(values=orig))\n # data that we update in JS code\n source = ColumnDataSource(data=dict(hist=hist, l_edges=edges[:-1], r_edges=edges[1:]))\n\n legend = ', '.join(': '.join(map(str, gv)) for gv in zip(groups, group_vs)) \\\n if groups is not None else 'all'\n # create figure\n p = fig.quad(source=source, top='hist', bottom=0,\n left='l_edges', right='r_edges',\n fill_color=palette[j], legend=legend if legend_loc is not None else None,\n muted_alpha=0,\n line_color=\"#555555\", fill_alpha=fill_alpha)\n\n # create callback and slider\n callback = CustomJS(args=dict(source=source, orig=orig), code=_inter_hist_js_code)\n callback.args['bins'] = slider\n callbacks.append(callback)\n\n # add the current plot so that we can set it\n # visible/invisible in JS code\n plots.append(p)\n\n # slider now updates all values\n slider.js_on_change('value', *callbacks)\n\n button = Button(label='Toggle', button_type='primary')\n button.callback = CustomJS(\n args={'plots': plots},\n code='''\n for (var i = 0; i < plots.length; i++) {\n plots[i].muted = !plots[i].muted;\n }\n '''\n )\n\n if legend_loc is not None:\n fig.legend.location = legend_loc\n fig.legend.click_policy = 'mute'\n\n fig.xaxis.axis_label = key\n fig.yaxis.axis_label = 'normalized frequency'\n _set_plot_wh(fig, plot_width, plot_height)\n\n cols.append(column(slider, button, fig))\n\n # transform list of pairs of figures and sliders into list of lists, where\n # each sublist has length <= 2\n # note that bokeh does not like np.arrays\n grid = list(map(list, np.array_split(cols, np.ceil(len(cols) / 2))))\n\n show(layout(children=grid, sizing_mode='fixed', ncols=2))", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):\r\n # TODO\r\n pylab.hist(values, bins = numBins)\r\n pylab.xlabel(xLabel)\r\n pylab.ylabel(yLabel)\r\n if title != None:\r\n pylab.title(title)\r\n pylab.show()", "def h3(data, bins=None, **kwargs):\n return histogramdd(data, bins, **kwargs)", "def bdev_get_histogram(client, name):\n params = {'name': name}\n return client.call('bdev_get_histogram', params)", "def histo ( self ,\n xbins = 20 , xmin = None , xmax = None ,\n ybins = 20 , ymin = None , ymax = None ,\n hpars = () , \n histo = None ,\n integral = False ,\n errors = False , \n density = False ) :\n \n \n histos = self.make_histo ( xbins = xbins , xmin = xmin , xmax = xmax ,\n ybins = ybins , ymin = ymin , ymax = ymax ,\n hpars = hpars ,\n histo = histo )\n\n # loop over the historgam bins \n for ix,iy,x,y,z in histo.items() :\n\n xv , xe = x.value() , x.error()\n yv , ye = y.value() , y.error()\n \n # value at the bin center \n c = self ( xv , yv , error = errors ) \n\n if not integral : \n histo[ix,iy] = c\n continue\n\n # integral over the bin \n v = self.integral( xv - xe , xv + xe , yv - ye , yv + ye )\n \n if errors :\n if 0 == c.cov2 () : pass\n elif 0 != c.value() and 0 != v : \n v = c * ( v / c.value() )\n \n histo[ix,iy] = v \n\n ## coovert to density historgam, if requested \n if density : histo = histo.density()\n \n return histo", "def makeTH1(name,mean,sigma):\n\n hist = TH1D(name,name,100,-10,10)\n for i in range(1000):\n hist.Fill(gRandom.Gaus(mean,sigma))\n return hist", "def yieldhist(self):\n labels = [\"initial\"] + [f\"N - {i}\" for i in self._names] + [\"N\"]\n if not self._delayed_mode:\n h = hist.Hist(hist.axis.Integer(0, len(labels), name=\"N-1\"))\n h.fill(numpy.arange(len(labels)), weight=self._nev)\n\n else:\n h = hist.dask.Hist(hist.axis.Integer(0, len(labels), name=\"N-1\"))\n for i, weight in enumerate(self._masks, 1):\n h.fill(dask_awkward.full_like(weight, i, dtype=int), weight=weight)\n h.fill(dask_awkward.zeros_like(weight))\n\n return h, labels", "def _single_histogram(self, plot_name, histogram_data):\n hist, left_edges, right_edges = histogram_data\n\n # figure\n kwargs = {\n \"plot_height\": 250,\n \"height_policy\": \"fit\",\n \"plot_width\": 250,\n \"title\": plot_name,\n }\n p = default_figure(kwargs)\n\n # histogram\n fcolor = self.plot_design.base_color_tints[-3]\n p.quad(top=hist, bottom=0, left=left_edges, right=right_edges, fill_color=fcolor, line_color=fcolor)\n\n # plot specific styling\n p.y_range.start = 0\n p.xaxis.ticker = BasicTicker(desired_num_ticks=5)\n p.xaxis.formatter = NumeralTickFormatter(format=\"0.[0]\")\n\n return p", "def simple_hist(vals, samplename, bins=100, min=None, max=None):\n if not min:\n min = vals.min()\n if not max:\n max = vals.max()\n fig = plt.figure()\n plt.hist(vals.dropna(), bins=bins, range=[min, max])\n calc_vals = vals.dropna()\n plt.xlabel('Bins')\n plt.ylabel('Frequency')\n plt.title(samplename)\n plt.axvline(np.median(calc_vals),\n color='r', linestyle='dashed', linewidth=1)\n plt.grid(True)\n\n return fig", "def gethist(self, *args, **kwargs):\n variables, selection, issingle = unwrap_gethist_args(*args)\n verbosity = LOG.getverbosity(kwargs)\n name = kwargs.get('name', self.name )\n name += kwargs.get('tag', \"\" )\n title = kwargs.get('title', self.title )\n parallel = kwargs.get('parallel', False )\n kwargs['cuts'] = joincuts(kwargs.get('cuts'), self.cuts )\n kwargs['weight'] = joinweights(kwargs.get('weight', \"\"), self.weight ) # pass weight down\n kwargs['scale'] = kwargs.get('scale', 1.0) * self.scale * self.norm # pass scale down\n \n # HISTOGRAMS\n allhists = [ ]\n garbage = [ ]\n hargs = (variables, selection)\n hkwargs = kwargs.copy()\n if parallel and len(self.samples)>1:\n hkwargs['parallel'] = False\n processor = MultiProcessor()\n for sample in self.samples:\n processor.start(sample.gethist,hargs,hkwargs,name=sample.title) \n for process in processor:\n allhists.append(process.join())\n else:\n for sample in self.samples:\n if 'name' in kwargs: # prevent memory leaks\n hkwargs['name'] = makehistname(kwargs.get('name',\"\"),sample.name)\n allhists.append(sample.gethist(*hargs,**hkwargs))\n \n # SUM\n sumhists = [ ]\n if any(len(subhists)<len(variables) for subhists in allhists):\n LOG.error(\"MergedSample.gethist: len(subhists) = %s < %s = len(variables)\"%(len(subhists),len(variables)))\n for ivar, variable in enumerate(variables):\n subhists = [subhists[ivar] for subhists in allhists]\n sumhist = None\n for subhist in subhists:\n if sumhist==None:\n sumhist = subhist.Clone(\"%s_%s\"%(variable.filename,name))\n sumhist.SetTitle(title)\n sumhist.SetDirectory(0)\n sumhist.SetLineColor(self.linecolor)\n sumhist.SetFillColor(self.fillcolor)\n sumhist.SetMarkerColor(self.fillcolor)\n sumhists.append(sumhist)\n else:\n sumhist.Add(subhist) \n if verbosity>=4:\n printhist(sumhist,pre=\">>> \")\n deletehist(subhists)\n \n # PRINT\n if verbosity>=2:\n nentries, integral = -1, -1\n for sumhist in sumhists:\n if sumhist.GetEntries()>nentries:\n nentries = sumhist.GetEntries()\n integral = sumhist.Integral()\n print \">>>\\n>>> MergedSample.gethist - %s\"%(color(name,color=\"grey\"))\n print \">>> entries: %d (%.2f integral)\"%(nentries,integral)\n \n if issingle:\n return sumhists[0]\n return sumhists", "def proj2hist( sample , tname, bins = 100, low = 163, high = 173):\n hist = ROOT.TH1F(tname,tname+\";E, a.u.;N\",bins,low,high)\n for ev in sample:\n hist.Fill( ev )\n return hist", "def gethistofromarray(self, ID, data, **kwargs):\n\n try:\n self.H[ID]=Histo( ID)\n self.H[ID].gethistofromarray(data, **kwargs)\n except:\n print \"could not create histo with kwargs:\"\n for name, value in kwargs.items():\n print name, value\n raise", "def hist(self):\r\n plt.hist(self.data_array, bins='auto', density=False, facecolor='b')\r\n plt.title(self.column_name)\r\n plt.savefig(self.column_name + \".svg\")\r\n plt.close()", "def make_histo ( self , \n xbins = 20 , xmin = None , xmax = None ,\n ybins = 20 , ymin = None , ymax = None ,\n hpars = () , \n histo = None ) :\n \n import ostap.histos.histos\n\n # histogram is provided \n if histo :\n \n assert isinstance ( histo , ROOT.TH2 ) and not isinstance ( histo , ROOT.TH3 ) , \\\n \"Illegal type of ``histo''-argument %s\" % type( histo )\n \n histo = histo.clone()\n histo.Reset()\n\n # arguments for the histogram constructor \n elif hpars :\n \n histo = ROOT.TH2F ( hID () , 'PDF%s' % self.name , *hpars )\n if not histo.GetSumw2() : histo.Sumw2()\n\n # explicit construction from (#bins,min,max)-triplet \n else :\n \n assert isinstance ( xbins , integer_types ) and 0 < xbins, \\\n \"Wrong ``xbins''-argument %s\" % xbins \n assert isinstance ( ybins , integer_types ) and 0 < ybins, \\\n \"Wrong ``ybins''-argument %s\" % ybins \n if xmin == None and self.xminmax() : xmin = self.xminmax()[0]\n if xmax == None and self.xminmax() : xmax = self.xminmax()[1]\n if ymin == None and self.yminmax() : ymin = self.yminmax()[0]\n if ymax == None and self.yminmax() : ymax = self.yminmax()[1]\n \n histo = ROOT.TH2F ( hID() , 'PDF%s' % self.name ,\n xbins , xmin , xmax ,\n ybins , ymin , ymax )\n if not histo.GetSumw2() : histo.Sumw2()\n\n return histo", "def from_hist(cls, hist, **options):\n # For 1D hists derived from 2D hists\n if 'rebin' in options:\n hist.Rebin(options.pop('rebin'))\n if 'projectionx' in options:\n arg = options.pop('projectionx')\n bin1 = hist.GetYaxis().FindBin(arg[0])\n bin2 = hist.GetYaxis().FindBin(arg[1])\n hist = hist.ProjectionX(hist.GetName()+\"_px\",bin1,bin2,\"e\")\n if 'projectiony' in options:\n arg = options.pop('projectiony')\n bin1 = hist.GetXaxis().FindBin(arg[0])\n bin2 = hist.GetXaxis().FindBin(arg[1])\n hist = hist.ProjectionY(hist.GetName()+\"_py\",bin1,bin2,\"e\")\n if 'averagex' in options:\n arg = options.pop('averagex')\n bin1 = hist.GetYaxis().FindBin(arg[0])\n bin2 = hist.GetYaxis().FindBin(arg[1])\n avg_hist = hist.ProjectionX(hist.GetName()+\"_px\",bin1,bin1,\"e\")\n for i in range(1,avg_hist.GetNbinsX() + 1):\n avg_hist.SetBinContent(i,0)\n avg_hist.SetBinError(i,0)\n tmp_weights = avg_hist.Clone(hist.GetName()+\"_weights\")\n for i in range(bin1,bin2):\n tmp_hist = hist.ProjectionX(hist.GetName()+\"_px\",i,i+1,\"e\")\n for j in range(1,avg_hist.GetNbinsX() + 1):\n tmp_weights.SetBinContent(j,tmp_weights.GetBinContent(j) + (tmp_hist.GetBinError(j) ** -2))\n tmp_hist.SetBinContent(j,tmp_hist.GetBinContent(j) * (tmp_hist.GetBinError(j) ** -2))\n tmp_hist.SetBinError(j,tmp_hist.GetBinError(j) * (tmp_hist.GetBinError(j) ** -2))\n avg_hist.Add(tmp_hist)\n avg_hist.Divide(tmp_weights)\n hist = avg_hist\n if 'averagey' in options:\n arg = options.pop('averagey')\n bin1 = hist.GetXaxis().FindBin(arg[0])\n bin2 = hist.GetXaxis().FindBin(arg[1])\n avg_hist = hist.ProjectionY(hist.GetName()+\"_py\",bin1,bin1,\"e\")\n for i in range(1,avg_hist.GetNbinsX() + 1):\n avg_hist.SetBinContent(i,0)\n avg_hist.SetBinError(i,0)\n tmp_weights = avg_hist.Clone(hist.GetName()+\"_weights\")\n for i in range(bin1,bin2):\n tmp_hist = hist.ProjectionY(hist.GetName()+\"_py\",i,i+1,\"e\")\n for j in range(1,avg_hist.GetNbinsX() + 1):\n tmp_weights.SetBinContent(j,tmp_weights.GetBinContent(j) + (tmp_hist.GetBinError(j) ** -2))\n tmp_hist.SetBinContent(j,tmp_hist.GetBinContent(j) * (tmp_hist.GetBinError(j) ** -2))\n tmp_hist.SetBinError(j,tmp_hist.GetBinError(j) * (tmp_hist.GetBinError(j) ** -2))\n avg_hist.Add(tmp_hist)\n avg_hist.Divide(tmp_weights)\n hist = avg_hist\n if 'error' in options and options.pop('error'):\n for i in range(1, hist.GetNbinsX()+1):\n hist.SetBinContent(i,hist.GetBinError(i)/hist.GetBinContent(i))\n hist.SetBinError(i,0.0)\n nbins = hist.GetNbinsX()\n bin_edges = np.fromiter((hist.GetBinLowEdge(i) for i in xrange(1,nbins+2)), np.float, nbins+1)\n values = np.fromiter((hist.GetBinContent(i) for i in xrange(1,nbins+1)), np.float, nbins)\n yerr = np.fromiter((hist.GetBinError(i) for i in xrange(1,nbins+1)), np.float, nbins)\n return cls.from_bin_edges(bin_edges,values,yerr,**options)", "def get_test_histograms1():\n # dummy dataset with mixed types\n # convert timestamp (col D) to nanosec since 1970-1-1\n import pandas as pd\n import histogrammar as hg\n\n df = pd.util.testing.makeMixedDataFrame()\n df['date'] = df['D'].apply(to_ns)\n df['boolT'] = True\n df['boolF'] = False\n\n # building 1d-, 2d-, and 3d-histogram (iteratively)\n hist1 = hg.Categorize(unit('C'))\n hist2 = hg.Bin(5, 0, 5, unit('A'), value=hist1)\n hist3 = hg.SparselyBin(origin=pd.Timestamp('2009-01-01').value, binWidth=pd.Timedelta(days=1).value,\n quantity=unit('date'), value=hist2)\n # fill them\n hist1.fill.numpy(df)\n hist2.fill.numpy(df)\n hist3.fill.numpy(df)\n\n return df, hist1, hist2, hist3", "def __init__(self,name, histogram):\n\n\n assert isinstance(histogram,Histogram), \"input must be a 3ML histogram\"\n\n self._histogram = histogram #type: Histogram\n\n\n super(HistLike, self).__init__(name=name,\n x=self._histogram.mid_points,\n y=self._histogram.contents,\n yerr=self._histogram.errors,\n poisson_data=self._histogram.is_poisson)", "def hog_histograms(*args, **kwargs): # real signature unknown\n pass", "def histogramdd(data: Union[Array, ArrayLike], bins: Any = None, **kwargs):\n from dask.array.rechunk import rechunk\n\n if isinstance(data, (list, tuple)):\n data = dask.array.stack(data, axis=1)\n\n if not isinstance(data, Array):\n data = np.asarray(data)\n data = dask.array.from_array(\n data, chunks=(int(data.shape[0] / options[\"chunk_split\"]), data.shape[1])\n )\n else:\n data = rechunk(data, {1: data.shape[1]})\n\n if isinstance(data, dask.array.Array):\n if data.ndim != 2:\n raise ValueError(\n f\"Only (n, dim) data allowed for histogramdd, {data.shape} encountered.\"\n )\n\n if not kwargs.get(\"adaptive\", True):\n raise ValueError(\"Only adaptive histograms supported for dask (currently).\")\n kwargs[\"adaptive\"] = True\n\n def block_hist(array):\n return original_hdd(array, bins, **kwargs)\n\n return _run_dask(\n name=\"dask_adaptive_dd\",\n data=cast(Array, data),\n compute=kwargs.pop(\"compute\", True),\n method=kwargs.pop(\"dask_method\", \"threaded\"),\n func=block_hist,\n expand_arg=True,\n )", "def new_track_density(track_key,hist_dims,conn):\n\n # extract all of the md needed to look up the data\n\n (fname,iden_key,track_key,dset_key) = conn.execute(\"select fout,iden_key,comp_key,dset_key from tracking where comp_key = ?\",\n track_key).fetchone()\n print fname\n F = h5py.File(fname,'r')\n print F.keys()[:5]\n try:\n start_plane = F[fd('tracking',track_key)]['start_plane'][:]\n start_part = F[fd('tracking',track_key)]['start_particle'][:]\n\n print len(start_plane)\n \n # figure out the right size to make the array\n dims = F.attrs['dims']\n print dims\n # make data collection object\n hist2D_ac = Hist2D_accumlator(dims,hist_dims)\n # loop over the heads of track index and hash result\n cur_plane = None\n cur_x = None\n cur_y = None\n temp = 0\n fr_count = 0\n for plane,part in zip(start_plane,start_part):\n if not plane == cur_plane:\n cur_plane = plane\n cp = F[ff(cur_plane)]\n cur_x = cp[fd('x',iden_key)]\n cur_y = cp[fd('y',iden_key)]\n temp += cp.attrs['temperature']\n fr_count += 1\n\n hist2D_ac.add_point(\n (cur_x[part],\n cur_y[part])\n )\n pass\n except ValueError,er:\n print ff(cur_plane)\n \n \n finally:\n F.close()\n del F\n\n f = plt.figure()\n ax = f.add_axes([.1,.1,.8,.8])\n c = ax.imshow(np.flipud(hist2D_ac.data.T),interpolation='nearest')\n plt.colorbar(c)\n ax.set_title('%.2f C '%(temp/fr_count) + str(dset_key))\n return hist2D_ac.data", "def __init__(self, name, instance, revision=None, histograms_url=None,\n additional_histograms=None):\n\n if revision and histograms_url:\n raise ValueError(\"Invalid use of both revision and histograms_url\")\n\n # For backwards compatibility.\n if not histograms_url:\n revision = \\\n (revision or HISTOGRAMS_JSON_REVISION).replace(\"/rev/\", \"/raw-file/\")\n histograms_url = revision + HISTOGRAMS_JSON_PATH\n\n self.histograms_url = histograms_url\n histograms_definition = _fetch_histograms_definition(histograms_url)\n\n if additional_histograms:\n histograms_definition.update(additional_histograms)\n\n # TODO: implement centralized revision service which handles all the quirks...\n if name.startswith(\"USE_COUNTER_\") or name.startswith(\"USE_COUNTER2_\"):\n self.definition = histogram_tools.Histogram(name, {\"kind\": \"boolean\", \"description\": \"\", \"expires_in_version\": \"never\"})\n else:\n proper_name = name\n if \"/\" in name: # key in a keyed histogram, like BLOCKED_ON_PLUGIN_INSTANCE_INIT_MS/'Shockwave Flash14.0.0.145'\n proper_name = name.split(\"/\")[0] # just keep the name of the parent histogram\n\n try:\n self.definition = histogram_tools.Histogram(name, histograms_definition[proper_name])\n\n except KeyError:\n # Some histograms are collected twice: during startup and during normal execution.\n # In the former case the STARTUP_ prefix prepends the histogram name, even though\n # the prefixed histogram name is not part of the histogram definition file.\n # Other histograms, like STARTUP_CRASH_DETECTED, are instead collected only once\n # and are defined the histogram definition file.\n self.definition = histogram_tools.Histogram(name, histograms_definition[re.sub(\"^STARTUP_\", \"\", proper_name)])\n\n self.kind = self.definition.kind()\n self.name = name\n\n if isinstance(instance, list) or isinstance(instance, np.ndarray) or isinstance(instance, pd.Series):\n if len(instance) == self.definition.n_buckets():\n values = instance\n else:\n values = instance[:-5]\n self.buckets = pd.Series(values, index=self.definition.ranges(), dtype='int64')\n else:\n entries = {int(k): v for k, v in instance[\"values\"].items()}\n self.buckets = pd.Series(entries, index=self.definition.ranges(), dtype='int64').fillna(0)", "def _generate_histograms(self):\n\n def get_xbins(xcolname):\n \"\"\"Returns the 'xbins' dictinary for plotly's 'Histrogram()' method.\"\"\"\n\n xmin, xmax = (float(\"inf\"), -float(\"inf\"))\n for res in self.rsts:\n xdata = self._base_unit(res.df, xcolname)\n xmin = min(xmin, xdata.min())\n xmax = max(xmax, xdata.max())\n\n return {\"size\" : (xmax - xmin) / 1000}\n\n xcolnames = Trivial.list_dedup(self.hist + self.chist)\n hist_set = set(self.hist)\n chist_set = set(self.chist)\n\n for xcolname in xcolnames:\n if xcolname in hist_set:\n ycolname = \"Count\"\n pinfo = self._add_pinfo(xcolname, ycolname, is_hist=True)\n _LOG.info(\"Generating histogram: %s vs %s.\", xcolname, ycolname)\n gobjs = []\n xbins = get_xbins(xcolname)\n for res in self.rsts:\n xdata = self._base_unit(res.df, xcolname)\n try:\n gobj = plotly.graph_objs.Histogram(x=xdata, name=res.reportid, xbins=xbins,\n opacity=self._opacity)\n except Exception as err:\n raise Error(f\"failed to create histogram \"\n f\"'{ycolname}-vs-{xcolname}':\\n{err}\")\n gobjs.append(gobj)\n\n self._create_diagram(gobjs, pinfo)\n\n if xcolname in chist_set:\n ycolname = \"Percentile\"\n _LOG.info(\"Generating cumulative histogram: %s vs %s.\", xcolname, ycolname)\n pinfo = self._add_pinfo(xcolname, ycolname, is_hist=True)\n gobjs = []\n if xcolname not in hist_set:\n xbins = get_xbins(xcolname)\n for res in self.rsts:\n xdata = self._base_unit(res.df, xcolname)\n try:\n gobj = plotly.graph_objs.Histogram(x=xdata, name=res.reportid, xbins=xbins,\n cumulative=dict(enabled=True),\n histnorm=\"percent\",\n opacity=self._opacity)\n except Exception as err:\n raise Error(f\"failed to create cumulative histogram \"\n f\"'{ycolname}-vs-{xcolname}':\\n{err}\")\n gobjs.append(gobj)\n\n self._create_diagram(gobjs, pinfo)", "def getHistogram(self, bins='auto', step='auto', perChannel=False, targetImageSize=200,\n targetHistogramSize=500, **kwds):\n # This method is also used when automatically computing levels.\n if self.image is None or self.image.size == 0:\n return None, None\n if step == 'auto':\n step = (max(1, int(self._xp.ceil(self.image.shape[0] / targetImageSize))),\n max(1, int(self._xp.ceil(self.image.shape[1] / targetImageSize))))\n if self._xp.isscalar(step):\n step = (step, step)\n stepData = self.image[::step[0], ::step[1]]\n\n if isinstance(bins, str) and bins == 'auto':\n mn = self._xp.nanmin(stepData).item()\n mx = self._xp.nanmax(stepData).item()\n if mx == mn:\n # degenerate image, arange will fail\n mx += 1\n if self._xp.isnan(mn) or self._xp.isnan(mx):\n # the data are all-nan\n return None, None\n if stepData.dtype.kind in \"ui\":\n # For integer data, we select the bins carefully to avoid aliasing\n step = int(self._xp.ceil((mx - mn) / 500.))\n bins = []\n if step > 0.0:\n bins = self._xp.arange(mn, mx + 1.01 * step, step, dtype=int)\n else:\n # for float data, let numpy select the bins.\n bins = self._xp.linspace(mn, mx, 500)\n\n if len(bins) == 0:\n bins = self._xp.asarray((mn, mx))\n\n kwds['bins'] = bins\n\n cp = getCupy()\n if perChannel:\n hist = []\n for i in range(stepData.shape[-1]):\n stepChan = stepData[..., i]\n stepChan = stepChan[self._xp.isfinite(stepChan)]\n h = self._xp.histogram(stepChan, **kwds)\n if cp:\n hist.append((cp.asnumpy(h[1][:-1]), cp.asnumpy(h[0])))\n else:\n hist.append((h[1][:-1], h[0]))\n return hist\n else:\n stepData = stepData[self._xp.isfinite(stepData)]\n hist = self._xp.histogram(stepData, **kwds)\n if cp:\n return cp.asnumpy(hist[1][:-1]), cp.asnumpy(hist[0])\n else:\n return hist[1][:-1], hist[0]", "def hist(self, nBins, vmin1=None, vmax1=None, vmin2=None, vmax2=None):\n\n if vmin1 == None: vmin1 = self.min1\n if vmax1 == None: vmax1 = self.max1\n if vmin2 == None: vmin2 = self.min2\n if vmax2 == None: vmax2 = self.max2\n histogram = Histogram3D(nBins, (vmin1, vmin2), (vmax1, vmax2),\n log=False)\n histogram.values = np.array(list(\n zip(self.valuesArray1, self.valuesArray2)))\n\n return histogram.get_histogram()", "def __init__(self, param, lower, upper, binCount = 50,\n xscale = None, yweight = None, autoFollow = True):\n logging.debug('Hist init: {} [{}, {}]'\n .format(param.name(), lower, upper))\n super(Histogram, self).__init__(title = \"({0}, {1})\".format(lower, upper))\n # add it to the parameter here\n if isinstance(param, ParameterBase):\n self.param = param # parameter we belong to is mandatory\n self.binCount = int(binCount) # bin count is mandatory\n self.xrange = (float(lower), float(upper))\n # setter chose the first option available for invalid options\n self.xscale = xscale\n self.yweight = yweight\n if not isinstance(autoFollow, bool):\n autoFollow = (autoFollow.title() == \"True\")\n self.autoFollow = autoFollow", "def histogram_single_values(self, x_axis:str, title:str, max_bins:int=.1, density: bool=False, show:bool=True, dest_path:str=None):\n d = {}\n for pop in self.populations:\n df = pd.read_csv(self.path + '\\\\{}.csv'.format(pop))\n trials = df['label'].tolist()\n values = df['Component 1'].tolist()\n response = df['response'].tolist()\n \n for i in range(len(response)):\n if response[i] in d:\n d[response[i]].add(values[i])\n else:\n # Removing day 4 trials\n if eval(trials[i])[0] != 4:\n d[response[i]] = set()\n d[response[i]].add(values[i])\n\n data = []\n labels = d.keys()\n if len(labels)==4:\n labels = ['0->0', '0->1', '1->1', '1->0']\n for key in labels:\n data.append(list(d[key]))\n \n bins = np.linspace(0, max_bins, 500)\n colors = self.__multiclass_to_color(deepcopy(labels))\n for i in range(len(labels)):\n plt.hist(d[labels[i]], bins, alpha=0.5, label=labels[i], color=colors[i], histtype='step', density=density)\n plt.axvline(np.array(list(d[labels[i]])).mean(), ls='--', color=colors[i], linewidth=1, label=\"{} mean\".format(labels[i]))\n plt.title(title)\n plt.xlabel(x_axis)\n if density:\n plt.ylabel(\"density\")\n else:\n plt.ylabel(\"Occurences\")\n plt.legend(loc='upper right')\n\n if show:\n plt.show()\n\n if dest_path !=None:\n plt.savefig(dest_path + '\\\\{}.png'.format(title))\n\n plt.clf()\n plt.cla()\n plt.close()", "def hist(x, bins='auto', range=None, weights=None, errorbars=False, normed=False, scale=None,\n stacked=False, histtype='stepfilled', **kwargs):\n\n # Generate a histogram object\n\n hist_con = HistContainer(x, bins, range, weights, errorbars, normed, scale, stacked,\n histtype, **kwargs)\n\n if hist_con.err_return:\n return hist_con.bin_content, hist_con.bin_edges, hist_con.bin_err, hist_con.vis_object\n else:\n return hist_con.bin_content, hist_con.bin_edges, hist_con.vis_object", "def get_hist_data(self, start_datetime, end_datetime, bar_size, fields):\n pass", "def hist_save(self, d, bin1, name, no):\n\t\tfor i in range(0,no):\n\t\t\ts = d[:,i]\n\t\t\tplt.hist(s, bin1, normed=True, color='c')\t# Extracting the parameters from the histogram\n\t\t\tplt.title('Probability Distribution Fnction of %s' %name, fontsize=20)\n\t\t\tplt.xlabel(\"Filter tap values\", fontsize=20)\n\t\t\tplt.ylabel(\"Probability Distribution\", fontsize=20)\n#\t\t\tplt.xlim(0,0.10)\n\t\t\tplt.ylim(0,100)\n#\t\t\tplt.legend(fontsize = 'xx-large')\n\t\t\tplt.savefig('/home/abhishek/Results/comparison_all_sets/Curve fitting/test/set_1/hist_%s_index_%d' %(name,i))\n\t\t\tplt.close()", "def HistData(i, sim_dict, data_folder=\".\"):\n file_name = \"{}/data{}.txt\".format(data_folder, i)\n temp_dict = copy.deepcopy(sim_dict)\n ReMapSim(temp_dict)\n sim = temp_dict['Simulation']\n\n sim.runHist(\n temp_dict['horizon'],\n temp_dict['cycles'],\n file_name)\n\n return None", "def load_hist(self, file, directory, name) :\n hist = file.Get(directory+'/'+name)\n if type(hist) == ROOT.TObject :\n print \"hist not found: \", file.GetName(), \":\", directory+'/'+name\n return hist", "def efficient_Make_Binned_ROC_histograms(title, data, bins, PU_range='full'):\n diff_ran = (-25,25)\n diff_bins = diff_ran[1]-diff_ran[0]\n ratio_ran = (0,10)\n ratio_bins = 60\n\n Diff_hist_list = []\n Ratio_hist_list = []\n CSV_hist_list = []\n ZeroDiv_list = []\n for bin_ in range(len(bins)-1):\n Diff_hist_list.append(rt.TH1D(\"L4-L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"L4-L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),diff_bins,diff_ran[0],diff_ran[1]))\n Ratio_hist_list.append(rt.TH1D(\"L4_L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"L4_L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),ratio_bins,ratio_ran[0],ratio_ran[1]))\n CSV_hist_list.append(rt.TH1D(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),ratio_bins,0,1))\n ZeroDiv_list.append(0)\n\n for particle in data:\n if PU_range != 'full':\n if particle[-1]<PU_range[0] or particle[-1]>PU_range[1]: continue\n bin_number = FCM.bin_selection(particle,bins)\n if bin_number == -100: continue\n\n Diff_hist_list[bin_number].Fill(particle[8]-particle[5])\n CSV_hist_list[bin_number].Fill(particle[1])\n if particle[17] != 0:\n L4_L1 = particle[20]/particle[17]\n Ratio_hist_list[bin_number].Fill(L4_L1)\n else:\n ZeroDiv_list[bin_number] += 1\n\n tfile = rt.TFile(\"Thesis_Plots/root_files/{}_histograms.root\".format(title),\"recreate\")\n for hist in Diff_hist_list:\n hist.Write()\n for hist in Ratio_hist_list:\n hist.Write()\n for hist in CSV_hist_list:\n hist.Write()\n print \"saved histograms in Thesis_Plots/root_files/{}_histograms.root\".format(title)\n\n csv_file = open(\"Thesis_Plots/root_files/{}_ZeroDiv.csv\".format(title),\"wb\")\n writer = csv.writer(csv_file)\n writer.writerow(ZeroDiv_list)\n csv_file.close()\n print \"saved zero division occurences in Thesis_Plots/root_files/{}_ZeroDiv.csv\".format(title)", "def curve_fitting(self, d, bin1, name, no):\n\t\tfor i in range(0,no):\n\t\t\ts = d[:,i]\n\t\t\tdist_names = ['rayleigh', 'norm', 'lognorm', 'gamma']# , 'rice']\n\t\t\tcolors = ['b', 'g', 'r', 'y', 'm']\n\t\t\tfor dist_name,col in zip(dist_names,colors):\n\t\t\t\tdist = getattr(sp, dist_name)\n\t\t\t\tparam = dist.fit(s)\n\t\t\t\tcount, bins, ignored = plt.hist(s, bin1, normed=True)\t# Extracting the parameters from the histogram\n\t\t\t\tpdf_fitted = dist.pdf(bins, *param[:-2], loc=param[-2], scale=param[-1])\n\t\t\t\tplt.plot(bins, pdf_fitted, linewidth=2, color=col , label=dist_name)\n\t\t\t\tplt.title('Distribution Fitting %s' %name, fontsize=25)\n\t\t\t\tplt.xlabel(\"Filter tap values\", fontsize=20)\n\t\t\t\tplt.ylabel(\"Probability Distribution\", fontsize=20)\n\t\t\t\tplt.legend(fontsize = 'xx-large')\n\t\t\t\tplt.savefig('/home/abhishek/Results/comparison_all_sets/Curve fitting/plots/hist_%s_index_%d' %(name,i))\n\t#\t\tplt.show()\n\t\t\tplt.close()", "def fitHisto ( self ,\n histo ,\n draw = False ,\n silent = False ,\n density = False ,\n chi2 = False ,\n args = () , **kwargs ) :\n\n xminmax = histo.xminmax()\n yminmax = histo.yminmax() \n with RangeVar( self.xvar , *xminmax ) , RangeVar ( self.yvar , *yminmax ):\n \n hdata = getattr ( self , 'histo_data' , None )\n if hdata and isinstance ( hdata , H2D_dset ) and \\\n hdata.histo is histo and \\\n hdata.density == density and \\\n hdata.histo_hash == hash ( histo ) :\n ## reuse the existing dataset\n self.debug ('Reuse the existing H2D_dset') \n data = hdata.dset\n else : \n ## convert it!\n self.debug ('Create new H2D_dset' ) \n self.histo_data = H2D_dset ( histo , self.xvar , self.yvar , density , silent )\n data = self.histo_data.dset \n \n ## fit it!!\n if chi2 : return self.chi2fitTo ( data ,\n draw = draw ,\n silent = False ,\n density = density ,\n args = args , **kwargs )\n else : return self.fitTo ( data ,\n draw = draw ,\n nbins = histo.nbinsx() ,\n ybins = histo.nbinsy() ,\n silent = silent ,\n args = args , **kwargs )", "def setup_hist(self):\n self.x_min = {}\n self.x_max = {}\n self.x_max_minus_min = {}\n self.dx = {}\n self.n_bins = {}\n\n self.histogram_edges = {}\n self.histogram_values = {}\n self.histogram_cdf = {}", "def get_histogram_function(file_name, branch):\n\n def fill_bound(x):\n x[0] = x[1] - (x[2] - x[1])\n x[-1] = x[-2] + (x[-2] - x[-3])\n return x\n\n # \"data/EffMap_B0toD0Dspi_Run2.root\"\n with uproot.open(file_name) as f:\n bg = f.get(branch) # \"RegDalitzEfficiency\")\n\n counts, edges = bg.allnumpy()\n x, y = edges[0] # D0barpi, Dspi\n x = fill_bound(x)\n y = fill_bound(y)\n x = (x[:-1] + x[1:]) / 2\n y = (y[:-1] + y[1:]) / 2\n f = interpolate.RectBivariateSpline(x, y, counts)\n return f, x, y", "def make_hist(filename, density_bounds, temperature_bounds, bins):\n\n density_bins = np.logspace(\n np.log10(density_bounds[0]), np.log10(density_bounds[1]), bins\n )\n temperature_bins = np.logspace(\n np.log10(temperature_bounds[0]), np.log10(temperature_bounds[1]), bins\n )\n\n dens, temps, metals = get_data(filename)\n\n H, density_edges, temperature_edges = np.histogram2d(\n dens, temps, bins=[density_bins, temperature_bins], weights=metals\n )\n\n H_norm, _, _ = np.histogram2d(dens, temps, bins=[density_bins, temperature_bins])\n\n # Avoid div/0\n mask = H_norm == 0.0\n H[mask] = -25\n H_norm[mask] = 1.0\n\n return np.ma.array((H / H_norm).T, mask=mask.T), density_edges, temperature_edges", "def get_test_histograms2():\n # dummy dataset with mixed types\n # convert timestamp (col D) to nanosec since 1970-1-1\n import pandas as pd\n import histogrammar as hg\n\n df = pd.util.testing.makeMixedDataFrame()\n\n # building 1d-, 2d-histogram (iteratively)\n hist1 = hg.Categorize(unit('C'))\n hist2 = hg.Bin(5, 0, 5, unit('A'), value=hist1)\n hist3 = hg.Bin(5, 0, 5, unit('A'))\n hist4 = hg.Categorize(unit('C'), value=hist3)\n\n # fill them\n hist1.fill.numpy(df)\n hist2.fill.numpy(df)\n hist3.fill.numpy(df)\n hist4.fill.numpy(df)\n\n return df, hist1, hist2, hist3, hist4", "def fitted_data(self, d, bin1, name, no):\n\t\tf = open('/home/abhishek/Results/comparison_all_sets/Curve fitting/data/%s' %(name), 'w')\t\n\t\tfor i in range(0,no):\n\t\t\ts = d[:,i]\n\t\t\tdist_names = ['rayleigh', 'norm', 'lognorm', 'gamma']\n\t\t\tcolors = ['b', 'g', 'r', 'y', 'm']\n\t\t\tf.write('INDEX %d \\n' %i)\n\t\t\tfor dist_name,col in zip(dist_names,colors):\n\t\t\t\tdist = getattr(sp, dist_name)\n\t\t\t\tparam = dist.fit(s)\n\t\t\t\tcount, bins, ignored = plt.hist(s, bin1, normed=True)\t# Extracting the parameters from the histogram\n\t\t\t\tf.write(\" Params %s \\nIgnored %s \\n\" %(param, ignored))\n\t\t\tf.write('\\n \\n \\n')\n\t\n\t\tf.close()", "def generate_histogram(data, x, y, data_format=\"wide\", orientation='v', **kwargs):\n if data_format == \"wide\":\n fig = go.Figure()\n\n # Iterate over attributes provided in the X Axis dropdown and add traces with histograms\n for attribute in x:\n # Flip axes depending on orientation\n if orientation == \"v\":\n x_axis = data[\"value\"][data[\"variable\"] == attribute]\n y_axis = data[\"variable\"][data[\"variable\"] == attribute]\n else:\n y_axis = data[\"value\"][data[\"variable\"] == attribute]\n x_axis = data[\"variable\"][data[\"variable\"] == attribute]\n \n fig.add_trace(go.Histogram(\n x=x_axis,\n y=y_axis,\n name=attribute,\n orientation=orientation\n )\n )\n # Make the histograms visible if they overlap\n fig.update_layout(barmode='overlay', xaxis_title=\"Value\", yaxis_title=\"Frequency\")\n if orientation == \"h\":\n fig.update_layout(xaxis_title=\"Frequency\", yaxis_title=\"Value\")\n fig.update_traces(opacity=0.8)\n return fig\n \n elif data_format == \"long\":\n if orientation == \"h\":\n fig = px.histogram(data, y=x, **kwargs)\n fig.update_layout(barmode='overlay', xaxis_title=\"Frequency\", yaxis_title=\"Value\")\n\n else:\n fig = px.histogram(data, x=x, **kwargs)\n fig.update_layout(barmode='overlay', xaxis_title=\"Value\", yaxis_title=\"Frequency\")\n \n return fig", "def hist(\n self, by: IndexLabel | None = None, bins: int = 10, **kwargs\n ) -> PlotAccessor:\n return self(kind=\"hist\", by=by, bins=bins, **kwargs)", "def getLatticeHistogram(self, **kwargs):\n return self._sim.getLatticeHistogram(species=self, **kwargs)", "def set_ndarray(name, arr):\n set_to_db(key=name, str_value=arr.ravel().tostring())", "def fill(self, value, indices=None):\n # TODO deal with underflow and overflow and do the doc + optimize the function\n\n # change the value array to an array of Histogram index to be modified\n hist_indices = ((value - self.bin_edges[0]) // self.bin_width).astype(int)\n\n # treat overflow and underflow\n hist_indices[hist_indices > self.data.shape[-1] - 1] = self.data.shape[-1] - 1\n hist_indices[hist_indices < 0] = 0\n\n # get the corresponding indices multiplet\n dim_indices = tuple([np.indices(value.shape)[i].reshape(np.prod(value.shape)) for i in\n range(np.indices(value.shape).shape[0])], )\n dim_indices += (hist_indices.reshape(np.prod(value.shape)),)\n\n if value[..., 0].shape == self.data[..., 0].shape or not indices:\n self.data[dim_indices] += 1\n else:\n self.data[indices][dim_indices] += 1", "def ahistogram (inarray,numbins=10,defaultlimits=None,printextras=1):\r\n inarray = N.ravel(inarray) # flatten any >1D arrays\r\n if (defaultlimits <> None):\r\n lowerreallimit = defaultlimits[0]\r\n upperreallimit = defaultlimits[1]\r\n binsize = (upperreallimit-lowerreallimit) / float(numbins)\r\n else:\r\n Min = N.minimum.reduce(inarray)\r\n Max = N.maximum.reduce(inarray)\r\n estbinwidth = float(Max - Min)/float(numbins) + 1e-6\r\n binsize = (Max-Min+estbinwidth)/float(numbins)\r\n lowerreallimit = Min - binsize/2.0 #lower real limit,1st bin\r\n bins = N.zeros(numbins)\r\n extrapoints = 0\r\n for num in inarray:\r\n try:\r\n if (num-lowerreallimit) < 0:\r\n extrapoints = extrapoints + 1\r\n else:\r\n bintoincrement = int((num-lowerreallimit) / float(binsize))\r\n bins[bintoincrement] = bins[bintoincrement] + 1\r\n except: # point outside lower/upper limits\r\n extrapoints = extrapoints + 1\r\n if (extrapoints > 0 and printextras == 1):\r\n print '\\nPoints outside given histogram range =',extrapoints\r\n return (bins, lowerreallimit, binsize, extrapoints)", "def test_grad_hist1d(problem, independent_runs, q_kwargs):\n compare_fn = get_compare_fn(independent_runs)\n compare_fn(problem, (GradHist1d, AutogradGradHist1d), q_kwargs)", "def add_histograms(self, doc=None):\n if doc is None:\n doc = self.doc\n\n doc.add_root(age_gender_histograms(\n self.data,\n self.palette['color'],\n self.palette['hover']\n ))\n LOG.info('histograms added')\n return doc", "def plot_histogram(bins, data, title, saving_path, hist_name):\n\n x = np.asarray(data)\n plt.figure()\n plt.hist(x[np.isfinite(x)], bins)\n plt.title(title)\n if not os.path.exists(saving_path):\n os.mkdir(saving_path)\n plt.savefig(saving_path + hist_name)", "def make_1d_hist_plot(self, data, xlabel, title, ylabel, bins=10,\n histtype='bar', color='darkblue', alpha=0.9,\n xlabelsize='18', ylabelsize='18',\n titlesize=16, label=None, subplots_adjust=True,\n subplotnum=None, lw=1, subplotcols=4, normed=False):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n plt.grid(axis='y', zorder=0)\n plt.hist(\n data,\n bins=bins,\n histtype=histtype,\n color=color,\n alpha=alpha,\n zorder=3,\n label=label,\n lw=lw,\n normed=normed\n )\n plt.xlabel(xlabel, size=xlabelsize)\n if subplotnum is not None:\n if (subplotnum-1)%subplotcols == 0:\n plt.ylabel(ylabel, size=ylabelsize)\n else:\n plt.ylabel(ylabel, size=ylabelsize)\n plt.title(title, fontsize=titlesize)\n if subplots_adjust:\n plt.subplots_adjust(left=0.10, right=0.90, top=0.85, bottom=0.11)", "def get_histograms(self, folder_name):\n histograms_folder_name = folder_name + '_histograms'\n\n try:\n print(\"Making dir \" + str(histograms_folder_name) + \" for histograms\")\n os.mkdir(histograms_folder_name)\n except OSError:\n print(\"Folder exists, have you already created these/this??\")\n return\n\n print(\"Writing to folder: \" + str(histograms_folder_name))\n photo_list = self.get_photo_list(folder_name, '*.png')\n for name in photo_list:\n image = cv2.imread(folder_name + '/' + name, cv2.IMREAD_ANYDEPTH)\n plt.hist(image.ravel(), 256, [0, 65535])\n plt.savefig(histograms_folder_name + '/' + name + 'histogram.eps', format='eps')\n plt.clf()\n # plt.show()", "def anticum_hist(data, bins=None, toadd=0, zero=False, zerox=False):\n\n from numpy import histogram, unique, append, cumsum, zeros, array\n data = array(data)\n\n if bins is None:\n temp, bins = histogram(data, unique(data))\n hist = append(cumsum(temp[::-1])[::-1], 1)\n else:\n if len(data) == 0:\n return zeros(len(bins)), bins\n # if max(data) > max(bins):\n # print \"Must have the largest bin be bigger than the largest data point.\"\n # print max(bins)\n # print max(data)\n # import sys\n # sys.exit(1337)\n temp, bins = histogram(data, bins=bins)\n numbig = data[data > bins.max()].shape[0]\n # add on the objects that are above the last bin to the last count, so that they're included in the cumulative sum\n temp[-1] += numbig\n hist = append(cumsum(temp[::-1])[::-1], numbig)\n\n hist = hist + toadd # add some number to all the values\n\n if zero:\n hist = append(hist, 1e-10)\n bins = append(bins, bins[-1])\n if zerox:\n hist = append(hist[0], hist)\n bins = append(1e-10, bins)\n\n return hist, bins", "def ANN_Make_Binned_ROC_histograms(title,model, x_data, pT, CSV, bins, PU_range='full',addFeature=False):\n nbins = 60\n\n ANN_hist_list = []\n CSV_hist_list = []\n for bin_ in range(len(bins)-1):\n ANN_hist_list.append(rt.TH1D(\"ANN_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"ANN_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),nbins,0,1))\n CSV_hist_list.append(rt.TH1D(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),nbins,0,1))\n\n\tif addFeature == False:\n\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\telif addFeature == \"pT\":\n\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\telif addFeature == \"PV\":\n\t\tassert x_data.shape[1] == 21, \"wrong x_data shape: PV cannot be found\"\n\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\telse:\n\t\tprint \"invalid feature selection\"\n\t\treturn None\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\n for n,particle in enumerate(x_data):\n if PU_range != 'full':\n if particle[-1]<PU_range[0] or particle[-1]>PU_range[1]: continue\n if bin_numbers[n] == -100: continue\n ANN_hist_list[int(bin_numbers[n])].Fill(pred_y[n])\n CSV_hist_list[int(bin_numbers[n])].Fill(CSV[n])\n\n tfile = rt.TFile(\"Thesis_Plots/root_files/{}_histograms.root\".format(title),\"recreate\")\n for hist in ANN_hist_list:\n hist.Write()\n for hist in CSV_hist_list:\n hist.Write()\n print \"saved histograms in Thesis_Plots/root_files/{}_histograms.root\".format(title)", "def test_hist_from_gen():\n\n @njit\n def gen():\n x = y = count = 0\n while True:\n yield x, y\n count += 1\n x = np.sin(count)\n y = np.cos(count)\n\n N_ITER = 100\n W, H = 5, 6\n\n # put the vals from the generator into arrays, so can use np.histogram2d\n x_vals = np.zeros(N_ITER)\n y_vals = np.zeros(N_ITER)\n iterator = gen()\n for i in range(N_ITER):\n x, y = next(iterator)\n x_vals[i] = x\n y_vals[i] = y\n\n RANGE = [[x_vals.min(), x_vals.max()], [y_vals.min(), y_vals.max()]]\n\n # Test without log:\n hist_g = pixhist.from_gen(\n gen, N_ITER, W, H, RANGE, make_xy_proportional=False, log=False\n )\n hist_np = hist_from_arrays_np(x_vals, y_vals, W, H)\n\n assert np.all(hist_g == hist_np), \"hist_g != hist_np\"\n\n # Test with log:\n hist_g = pixhist.from_gen(\n gen, N_ITER, W, H, RANGE, make_xy_proportional=False, log=True\n )\n\n assert np.all(hist_g == np.log(hist_np + 1)), \"Logs not equal.\"", "def get1d(infile, histname, subdir='',verbose=False):\n \n ### 1d Histogram\n Hist = getter(infile,histname,subdir,verbose)\n \n nbinsHist = Hist.GetSize()-2\n axesX = np.zeros(nbinsHist)\n edgesX = np.zeros(nbinsHist+1)\n Arr = np.zeros(nbinsHist)\n dArr = np.zeros(nbinsHist)\n for j in xrange(0,nbinsHist):\n axesX[j] = Hist.GetBinCenter(j+1)\n edgesX[j] = Hist.GetBinLowEdge(j+1)\n Arr[j] = Hist.GetBinContent(j+1)\n dArr[j] = Hist.GetBinError(j+1)\n edgesX[nbinsHist] = Hist.GetBinLowEdge(nbinsHist+1)\n\n return axesX, edgesX, Arr, dArr", "def CreateHistFactoryFromMeasurement(measurement_dict, options=None):\n \n channel_list = measurement_dict[\"channel_list\"]\n measurement_info = measurement_dict[\"measurement_info\"]\n\n # Get the name of the sample\n # that is interpreted as signal\n signal_sample = str(measurement_info[\"signal_name\"])\n SigmaVarName = \"Sigma_\" + signal_sample + \"_OverSM\"; \n\n meas = ROOT.RooStats.HistFactory.Measurement(\"meas\", \"meas\")\n meas.SetPOI( SigmaVarName )\n meas.SetLumi( 1.0 )\n meas.SetLumiRelErr( float(measurement_info[\"lumi_uncertainty\"]) )\n meas.SetExportOnly( False )\n \n for chan_dict in channel_list:\n chan = ROOT.RooStats.HistFactory.Channel( str(chan_dict[\"name\"]) )\n chan.SetData( float(chan_dict['data']) )\n # chan.SetStatErrorConfig( 0.05, \"Poisson\" )\n \n for sample_dict in chan_dict[\"samples\"]:\n sample_name = sample_dict[\"name\"]\n sample = ROOT.RooStats.HistFactory.Sample( str(sample_name) )\n for syst in sample_dict[\"systematics\"]: \n sample.AddOverallSys( str(syst[\"name\"]), float(syst[\"FracDown\"]), float(syst[\"FracUp\"]) )\n sample.SetValue( float(sample_dict['value']) )\n if sample_name == signal_sample:\n sample.AddNormFactor( SigmaVarName, 1, 0, 3 )\n chan.AddSample( sample )\n \n meas.AddChannel( chan )\n \n # Now, print and do the fit\n meas.PrintTree();\n\n # Fit the workspace\n wspace = ROOT.RooStats.HistFactory.HistoToWorkspaceFactoryFast.MakeCombinedModel( meas );\n combined_config = wspace.obj(\"ModelConfig\");\n simData = wspace.data(\"obsData\");\n constrainedParams = combined_config.GetNuisanceParameters();\n POIs = combined_config.GetParametersOfInterest();\n \n # RooCmdArg(\"Minos\",kTRUE,0,0,0,0,0,&minosArgs,0)\n\n model = combined_config.GetPdf();\n fit_result = model.fitTo(simData, ROOT.RooCmdArg(\"Minos\",True,0,0,0,\"\",\"\",ROOT.RooArgSet(wspace.var(SigmaVarName)),0), \n ROOT.RooCmdArg(\"PrintLevel\",1), \n ROOT.RooCmdArg(\"Save\",True));\n\n # Get the Likelihood curve\n POI = wspace.var(SigmaVarName)\n png_string = CreateProfileLikelihoodPlot(model, simData, POI)\n\n # Get the Fitted Bins\n fitted_bins = getFittedBinHeights(combined_config, simData)\n\n # Delete the model\n wspace.IsA().Destructor( wspace )\n meas.IsA().Destructor( meas )\n\n return (fit_result, fitted_bins, png_string)", "def draw_histogram(data, # type: thelper.typedefs.ArrayType\n bins=50, # type: Optional[int]\n xlabel=\"\", # type: Optional[thelper.typedefs.LabelType]\n ylabel=\"Proportion\", # type: Optional[thelper.typedefs.LabelType]\n show=False, # type: Optional[bool]\n block=False, # type: Optional[bool]\n ): # type: (...) -> thelper.typedefs.DrawingType\n fig, ax = plt.subplots()\n ax.hist(data, density=True, bins=bins)\n if len(ylabel) > 0:\n ax.set_ylabel(ylabel)\n if len(xlabel) > 0:\n ax.set_xlabel(xlabel)\n ax.set_xlim(xmin=0)\n if show:\n fig.show()\n if block:\n plt.show(block=block)\n return fig\n plt.pause(0.5)\n return fig, ax" ]
[ "0.6048329", "0.59464675", "0.5933661", "0.5887746", "0.58725816", "0.57364476", "0.5714648", "0.5633713", "0.56145376", "0.5604098", "0.5588111", "0.5546958", "0.554628", "0.55081207", "0.549454", "0.54831433", "0.5475002", "0.5430655", "0.54043436", "0.53916496", "0.53677046", "0.53662616", "0.5340251", "0.530631", "0.52899975", "0.52799666", "0.52710634", "0.5262917", "0.5177245", "0.5161277", "0.5158786", "0.51579523", "0.51259464", "0.51081395", "0.50861466", "0.50850576", "0.50828534", "0.5076624", "0.50751805", "0.5055748", "0.5048084", "0.5018874", "0.50164396", "0.5007862", "0.50078577", "0.5007467", "0.5002948", "0.50015944", "0.49879774", "0.49836743", "0.49757552", "0.49678805", "0.4966966", "0.49560696", "0.4953377", "0.4947596", "0.49453348", "0.49388728", "0.49320498", "0.49320164", "0.49227", "0.49073064", "0.48991972", "0.4898994", "0.48479572", "0.48421872", "0.48407423", "0.48348823", "0.48311722", "0.48262742", "0.48252127", "0.4801855", "0.47978818", "0.47830975", "0.4782936", "0.47750658", "0.47749978", "0.47723868", "0.4764821", "0.47623208", "0.47529918", "0.47463325", "0.47455555", "0.4743161", "0.4738789", "0.47332", "0.47286192", "0.472179", "0.4714049", "0.46995088", "0.4693055", "0.46812043", "0.46791294", "0.4674278", "0.46659788", "0.46636474", "0.46626243", "0.46613902", "0.46603078", "0.4657775" ]
0.73073107
0
Decorator for posttrigger cuts
def apply_trigger_first(cut_fn): def wrapped(arrays, cut): arrays = svjflatanalysis.arrayutils.apply_trigger_and_jetpt550(arrays, 2018) return cut_fn(arrays, cut) return wrapped
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post_hooks(self):", "def onCut(self):\n pass", "def triggered(self, *args, **kwargs): # real signature unknown\n pass", "def __call__(self, trigger, type, event):", "def post_run_func_checked(driver: HammerDriver) -> None:\n if post_run_func is not None:\n post_run_func(driver)", "def at_post_cmd(self):\n pass", "def _hook(self):", "def on_fire(self, watermark, window, context):\n pass", "def fire(self):", "def light_post():\n return 'do some magic!'", "def post_event(self, func, *args, **kwargs):\n if not callable(func):\n assert(len(func) == 5)\n self._events.append(func + (log.get_tb(1), time.time()))\n else:\n self._events.append((func, args, kwargs, None, 0, log.get_tb(), time.time()))", "def on_hook(self) -> None:", "def with_post_function(self, post_fcn):\n old_post = self._post\n self._post = lambda loss: post_fcn(old_post(loss))\n return self", "def self_decorator(self, func):\n # TODO: Any other ways to pass variables to handlers?\n def command_func(update, context, *args, **kwargs):\n return func(self, update, context, *args, **kwargs)\n return command_func", "def _postprocess(self):", "def post_processor(self):", "def decorator(func):\n\t\treturn push_aspect(name or func.__name__, func)", "def _timestep_after_hook(self, *args, **kwargs):\n pass", "def hook_adapter_script(self) -> HookAdapterScript:", "def EpicsFunction(f):\n def wrapper(*args, **kwargs):\n wx.CallAfter(f, *args, **kwargs)\n return wrapper", "def post_execute(self):", "def fire(self):\n pass", "def off_hook(self) -> None:", "def after_request(self, func: typing.Callable):\n return self.add_hook(type_=\"post\", hook=func)", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def trigger(self, type, event):", "def post():\n raise NotImplementedError", "def do_post(self, *args):\n raise NotImplementedError()", "def _post(self, *args, **kwargs):\n return _TestA_swig.my_qpsk_demod_cb_sptr__post(self, *args, **kwargs)", "def Postcall(function_to_call_later): \n def postcall_inside(fun): \n @functools.wraps(fun)\n def relay(*args, **kwargs):\n return function_to_call_later(fun(*args, **kwargs))\n return relay\n return postcall_inside", "def toolDropped(*args, **kwargs)->None:\n pass", "def after_test(self, func, *args, **kwargs):\n pass", "def apply(self):", "def post_exec(self):\n raise NotImplementedError(\"Must implement in frontend subclass.\")", "def after_update(self, *args):\n raise NotImplementedError", "def closure(self, t):\n raise NotImplementedError", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def take_action(self, *args, **kwargs):\r\n pass", "def post_hook(config, final=False):\n if config.post_hook:\n if final or config.verb != \"renew\":\n logger.info(\"Running post-hook command: %s\", config.post_hook)\n _run_hook(config.post_hook)", "def post_processing(self) -> Optional[Callable]:\n if (\n \"transforms\" not in self._spec\n or \"post\" not in self._spec[\"transforms\"]\n ):\n # Passthrough\n return lambda x: x\n f = find_class(self._spec[\"transforms\"][\"post\"])\n return f(self.options)", "def wrapper(self, *args, **kwargs):\n if self.afterid:\n self.master.after_cancel(self.afterid)\n function(self, *args, **kwargs)\n self.afterid = self.master.after(5000, self.cycle)", "def post_process(self, relevant_targets):\r\n pass", "def on_shutdown(self):\n\n def decorator(coro):\n self._hooks.append((\"shutdown\", coro))\n return coro\n\n return decorator", "async def post_behavior_run(self) -> None:", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass" ]
[ "0.58980626", "0.5877055", "0.5827994", "0.5693356", "0.5606729", "0.55755043", "0.55027366", "0.5471056", "0.5466376", "0.5457004", "0.5404508", "0.5376659", "0.5318747", "0.53063715", "0.5298729", "0.5287767", "0.52239275", "0.5207728", "0.5186563", "0.51724184", "0.5144093", "0.51379627", "0.5122194", "0.51188064", "0.51039726", "0.51039726", "0.51039726", "0.51039726", "0.5079243", "0.5067651", "0.50170463", "0.5016877", "0.50066555", "0.5005766", "0.4978725", "0.49738798", "0.4969882", "0.4957267", "0.49510425", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49365", "0.49216264", "0.49203137", "0.49198705", "0.49185246", "0.49112496", "0.49097204", "0.49090904", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384", "0.49089384" ]
0.5477666
7
Takes a cut function and tries to return a title for it
def get_title(fn): title = fn.name if hasattr(fn, 'name') else fn.__name__ title = title.replace('_cut_function','') suffix = [] # if 'JetsAK15_subleading_' in title: # suffix.append(r'$j^{\mathrm{AK15}}_{\mathrm{subl}}$') title = title.replace('JetsAK15_subleading_', '').replace('subleading_', '') if hasattr(fn, 'left'): suffix.append('({:.0f} < {} < {:.0f})'.format(fn.left, svjflatanalysis.utils.get_title('mt'), fn.right)) # Transform variable name to title stirng title = svjflatanalysis.utils.get_title(title) if hasattr(fn, 'operator'): title += ' ' + fn.operator + ' cut' # Add the suffix title += ' ' + ' '.join(suffix) return title
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_title():", "def make_title(words):", "def getTitle(test:str) -> str:\n return test[5:].strip()", "def PROPER(text):\n return text.title()", "def test_title(names):", "def title(value):\r\n title_word = lambda w: w if RE_UPPERCASE.search(w) else old_title(w)\r\n return re.sub('(\\S+)', lambda m: title_word(m.group(0)), value)", "def _title(hit: DD) -> str:\n return hit[\"_source\"][\"title\"]", "def get_title(line):\n title = line.split(' (')[0]\n return title", "def cut(value,arg):\n return cut.replace(arg,\"\")", "def get_title(f):\n return os.path.basename(f)", "def _title(profile):\n if profile['operation'] == 'differential':\n p1, p2 = profile['profiles']\n return 'differential ({}, {})'.format(_title(p1), _title(p2))\n elif profile['operation'] == 'local feature':\n p = profile['profile']\n return 'local feature {} ({})'.format(profile['function'], _title(p))\n else:\n return ' '.join([str(x) for x in profile.values()])", "def print_title( title, decorators ):\n decorators = \"*\" * decorators\n print \"\\n%s %s: %s\\n\" % ( decorators, title, decorators )", "def test_getTitle(self):\n def checkNameAndTitle(name, titlesolution):\n title = self._nameClassifierBuilder._getTitle(name)\n self.assertEquals(titlesolution, title)\n\n checkNameAndTitle(\"Mrs. ldajfhgp\", \"Mrs\")\n checkNameAndTitle(\"dlsfajkMrdlkjaf\", \"Mr\")\n checkNameAndTitle(\"dagddgwdasJonkheer\", \"Jonkheer\")", "def get_title(self):\n return self.run_command('get_title')[0]", "def test_get_title(double_title, single_title, empty_title):\n assert get_title(double_title) == \"Parton distributions with LHC data\"\n assert get_title(single_title) == \"The Large Hadron Collider\"\n assert get_title(empty_title) == \"\"\n\n no_title_key = {\n \"not_titles\": []\n }\n assert get_title(no_title_key) == \"\"", "def title(self):\n return self.run_command('title')[0]", "def testCapTitleAgain(self):\n val = capTitles(\"victor\") \n self.assertEqual(val, \"Victor\")", "def title(self) -> String:\n pass", "def truncate_title(title):\n return title if len(title) <= 70 else title[:70]+\"...\"", "def parse_title(title, various):\n if various and \" - \" in title:\n title = title.split(\" - \", 1)[1]\n return RE_FEAT.sub(\"\", title).rstrip()", "def get_title(self) -> str:\n pass", "def titleForSelection(self, selection):\n if selection is None or selection.filename is None:\n return None\n else:\n directory, filename = os.path.split(selection.filename)\n try:\n slicing = self.__formatSlices(selection.slice)\n except Exception:\n _logger.debug(\"Error while formatting slices\", exc_info=True)\n slicing = '[sliced]'\n\n permuted = '(permuted)' if selection.permutation is not None else ''\n\n try:\n title = self.TITLE_PATTERN.format(\n directory=directory,\n filename=filename,\n datapath=selection.datapath,\n slicing=slicing,\n permuted=permuted)\n except Exception:\n _logger.debug(\"Error while formatting title\", exc_info=True)\n title = selection.datapath + slicing\n\n return title", "def correct_cap(title):\n try:\n fl = fln[title]\n return title\n except:\n #capitalize first letter only\n try:\n fl = fln[title[0].upper() + title[1:]]\n return title[0].upper() + title[1:]\n except:\n #try title case\n try:\n fl = fln[title.title()]\n return title.title()\n except KeyError:\n return \"\"", "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def pretty_title(title):\n output = '-' * 5 + ' ' + title.lower() + ' ' + '-' * 5\n return output", "def x_group_label(\n x_gr: int, cut: int = 20, name_dict: Dict[AnyStr, AnyStr] = names_dict\n) -> AnyStr:\n name = name_dict[str(x_gr)]\n if len(name) > cut:\n return f\"{name[:cut-3]}...\"\n else:\n return name", "def pathtitle(path):\n return thing_from_path(path).title", "def title_n(self):\n self.run_command('title_n')", "def favorite_book(title):\n print(\"You should really read \" + title.title() + \", it's my favorite!\")", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def make_heading(title):\n title_length = len(title)\n if title_length < 1:\n return \"\"\n elif title_length > 60:\n raise ValueError(\"The title is too long\")\n upper_title = title.upper()\n border_length = len(title) + 2\n border = \"=\" * border_length\n return \" \" + upper_title + \" \\n\" + border", "def getTitle(self): #$NON-NLS-1$\r", "def getTitle(self): #$NON-NLS-1$\r", "def getatitle(allcontent, corpus):\n for i in range(0, len(allcontent)):\n words = re.split(r'\\s+', allcontent[i])\n if words[0] == \"Title\":\n for j in range(2, len(words)):\n if len(processword(words[j])) > 0:\n corpus.append(processword(words[j]))", "def get_topicname ( base_name, object_type, condition ) :\n return base_name + '-' + object_type.upper( ) + '-' + condition.upper( )", "def clean_title(\r\n title: str,\r\n mode: Literal[\"soft\", \"hard\", \"safe\"],\r\n allow_dot: bool = False,\r\n n: Optional[int] = None,\r\n) -> str:\r\n ...", "def istitle(a):\n return _vec_string(a, bool_, 'istitle')", "def pretty_title(title):\n output = '-' * 5 + ' ' + title + ' ' + '-' * 5\n return output", "def title(string: str, level=H1) -> str:\n appended = []\n for lvl in range(0, level):\n appended.append(\"#\")\n return f\"{''.join(appended)} {string}\"", "def remove_article(str_):\n return str_.replace('the ', '').title()", "def name_title(self, val: str) -> None:\n\n # Make sure they don't pass underscores; title versions are just\n # words and spaces.\n if '_' in val:\n raise CleanError(\n f\"Custom FeatureSet name_title '{val}' contains\"\n ' underscores; it must contain only spaces.'\n )\n\n # Make sure the value they're providing still matches their base\n # name. It could be easy to let this fall out of sync\n # accidentally.\n if val.lower().replace(' ', '_') != self._name:\n raise CleanError(\n f\"Custom FeatureSet name_title '{val}' letters/spacing\"\n f\" does not match base name '{self._name}'.\"\n )\n\n # Ok val; we will accept you.\n self._name_title = val", "def cut(value,arg):\n return value.replace(arg, '')", "def _visit_title(self, elem):\n pass", "def summary_title(tile_summary):\n return f\"Slide tile_summary.slide_name Tile Summary:\"", "def cut(value,arg):\n return value.replace(arg,'')", "def cut(value,arg):\n return value.replace(arg,'')", "def cut(value,arg):\n return value.replace(arg,'')", "def cut(value,arg):\n return value.replace(arg,'')", "def cut(value,arg):\n return value.replace(arg,'')", "def cut(value, arg):\n return value.replace(arg, '') # we can replace arg with ''. We also need to register it", "def _subconstituent_name(h):\n if h == 1:\n o = \"1st\"\n elif h == 2:\n o = \"2nd\"\n elif h == 3:\n o = \"3rd\"\n else:\n o = \"%dth\" % h\n return \"%s subconstituent\" % o", "def __str__(self):\n len_title=75\n if len(self.description)>len_title:\n titlestring=self.description[:len_title] + '...'\n else:\n titlestring=self.description\n return titlestring", "def format_title(input_str):\n title_mapping = {'PD_whole_tree': 'Phylogenetic Diversity'}\n\n if input_str in title_mapping:\n return title_mapping[input_str]\n else:\n return ' '.join(map(lambda e: e[0].upper() + e[1:],\n input_str.split('_')))", "def title(self) -> str:\n\t\t# pylint: disable=unsubscriptable-object\n\t\treturn self.value[1]", "def get_title(self, obj):\n title = obj.habit.title\n return title", "def get_sub_title(self, article: BeautifulSoup):\n return self.get_text(article, self.parsing_template.sub_title)", "def TitlePrint(title):\n titleLength = len(title)\n barLength = titleLength + 12\n fmtdTitle = '----- {0} -----'.format(title)\n bar = '-' * barLength\n print(bar, fmtdTitle, bar,\n sep='\\n', end='\\n\\n')", "def test_basic(self):\n result = pdfmerger.strip_title(\"Interesting title (123) [321]\")\n self.assertEqual(\"Interesting title\", result)", "def get_title(filename):\n _,long_name = filename.split(\"\\\\\")\n name,_ = long_name.split(\"_gold_\")\n f = os.path.join(FIXED, name + \".fix\")\n #with open(f, 'r') as in_f:\n with codecs.open(f, 'r', encoding='utf-8') as in_f:\n lines = in_f.readlines()\n i = 0\n line = lines[i]\n while len(line) < 5:\n i += 1\n line = lines[i]\n return lines[i]", "def title_case(sentence):\n ignore = ['the', 'of', 'a']\n titled = [x[0].upper()+x[1:].lower() for x in sentence.split()]\n caps = []\n caps.append(titled[0])\n for tit in titled[1:]:\n print(tit)\n if tit.lower() in ignore:\n caps.append(tit.lower())\n else:\n caps.append(tit)\n titled = ' '.join(caps)\n return titled", "def cut(value, arg):\n return value.replace(arg, '')", "def cut(value, arg):\n return value.replace(arg, '')", "def title(self) -> str:\n raise NotImplementedError", "def mycut(value, arg):\r\n return value.replace(arg, '')", "def cutit(value,arg):\n return value.replace(arg, ' replaced text ')", "def _prettyfilename(self):\n return self.title", "def title_p(self):\n self.run_command('title_p')", "def getStoryTitle(self, source):\n titleStart = source.find('>', source.find('>')+1) + 1\n titleEnd = source.find('</a>')\n title = source[titleStart:titleEnd]\n title = title.lstrip() # Strip trailing whitespace characters.\n return title", "def create_title(title, year=None, time_step=None, base=0, interval=None,\n gage=None, m=None, h=None):\n if type(gage) is list or type(gage) is tuple:\n title = title + ' at listed gages'\n elif gage is not None:\n title = title + ' at '+ gage\n \n if m is not None:\n title = title + ' for Month {mo} of'.format(mo=m)\n elif h is not None:\n title = title + ' for Hour {ho} of'.format(ho=h) \n elif interval is 'seasonal':\n title = title + ' for Months of'\n elif interval is 'diurnal':\n title = title + ' for Hours of'\n if time_step is not None:\n ts = time_step.replace('min', ' minute').replace('T', ' minute').replace('H', ' hour').replace('D', ' day')\n title = title.format(ts=ts)\n if year is not None:\n title = title +' '+ year\n return title", "def get_title(self):\n return self._get_title_()", "def get_window_title(self): # real signature unknown; restored from __doc__\n return \"\"", "def istitle(self) -> bool:\n pass", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def get_title(rating):\n title = \"\"\n if rating < 1200:\n title = [\"Newbie\", \"grey-text\"]\n elif rating < 1400:\n title = [\"Pupil\", \"light-green-text\"]\n elif rating < 1600:\n title = [\"Specialist\", \"cyan-text\"]\n elif rating < 1900:\n title = [\"Expert\", \"indigo-text\"]\n elif rating < 2100:\n title = [\"Candidate Master\", \"purple-text\"]\n elif rating < 2300:\n title = [\"Master\", \"amber-text\"]\n elif rating < 2400:\n title = [\"International Master\", \"orange-text\"]\n elif rating < 2600:\n title = [\"Grandmaster\", \"red-text\"]\n elif rating < 3000:\n title = [\"International Grandmaster\", \"red-text\"]\n else:\n title = [\"Legendary Grandmaster\", \"red-text\"]\n return title", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def nice_name():\n\n pass", "def showSelectionInTitle(*args, **kwargs)->None:\n pass", "def book_title(title):\n # this will capitalize the first letter of every word\n title = title.title()\n pre_title = []\n pre_title = title.split(\" \")\n new_title = \"\"\n for word in pre_title:\n # If the word is the first word of the title it has to be capitalize\n if word != pre_title[0]:\n # If the word is in the small word list make it lower case\n if word.lower() in small_words:\n word = word.lower()\n new_title = new_title + word + ' '\n# Remove the lagging space \n return new_title.strip()", "def ccut(value,arg):\n return value.replace(arg, '')", "def get_title(line):\n\n assert line is not None\n # the format of line should be like this:\n # ' TITLE \"Some Title\"'\n # and we simply can just ignore the first 11 chars, but to be safe lets\n # make this assertion\n assert line[:11] == ' TITLE \"'\n # the last char should be a quote, this assertion helps validate this\n # assumption\n assert line[-1] == '\"'\n return line[11:-1].strip()", "def printable(title, subtitle=None, resp=None):\n title = getfirst(title)\n subtitle = getfirst(subtitle)\n resp = getfirst(resp)\n if subtitle:\n title += \" : \" + subtitle\n if resp:\n title += \" / \" + resp\n return title", "def get_title(rating):\n\ttitle = \"\"\n\tif rating < 1200:\n\t\ttitle = [\"Newbie\", \"grey-text\"]\n\telif rating < 1400:\n\t\ttitle = [\"Pupil\", \"light-green-text\"]\n\telif rating < 1600:\n\t\ttitle = [\"Specialist\", \"cyan-text\"]\n\telif rating < 1900:\n\t\ttitle = [\"Expert\", \"indigo-text\"]\n\telif rating < 2100:\n\t\ttitle = [\"Candidate Master\", \"purple-text\"]\n\telif rating < 2300:\n\t\ttitle = [\"Master\", \"amber-text\"]\n\telif rating < 2400:\n\t\ttitle = [\"International Master\", \"orange-text\"]\n\telif rating < 2600:\n\t\ttitle = [\"Grandmaster\", \"red-text\"]\n\telif rating < 3000:\n\t\ttitle = [\"International Grandmaster\", \"red-text\"]\n\telse:\n\t\ttitle = [\"Legendary Grandmaster\", \"red-text\"]\n\treturn title", "def _title(self, path):\n title = os.path.basename(os.path.splitext(path)[0])\n return title", "def favorite_book(title):\n\tprint(title + \" is one of my favorite book.\")", "def get_title(self, entry):\n title = _('%(title)s (%(word_count)i words)') % \\\n {'title': entry.title, 'word_count': entry.word_count}\n return title", "def better_title(value):\n t = APOSTROPHE_LETTER_REGEX.sub(\n lambda m: m.group(0).lower(), value.title())\n return DIGIT_LETTER_REGEX.sub(lambda m: m.group(0).lower(), t)", "def cli_get_process_title():\n raise NotImplementedError()", "def generate_horror_title():\n d666 = random.randint(1, 666)\n if d666 <= 111:\n #the adj noun\n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 111 and d666 <= 222: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 222 and d666 < 444: \n #the adj noun of verb \n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_verb[random.randint(0, len(horror_verb) - 1)]\n elif d666 >= 444 and d666 < 555: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 >= 555:\n #verb of the adj noun\n return horror_verb[random.randint(0, len(horror_verb) - 1)] + \" of the \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]", "def title_contains(title_substring):\n title_substring = title_substring.encode('ascii')\n def f(win):\n t = conv(win.title)\n return title_substring in t\n return f", "def favoriteBook(title):\n\t\n\tprint(\"\\nOne of my favorite books is \" + title.title() + \"!\")", "def get_detail_title(soort, edit, obj):\n naam_ev = get_names_for_type(soort)[0]\n if edit == 'new':\n return _('Nieuw(e) ') + str(naam_ev)\n try:\n title = \" \".join((naam_ev.capitalize(), obj.naam))\n except AttributeError:\n title = \" \".join((naam_ev.capitalize(), obj.nummer))\n return title", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def extract_info(config, cut, label):\n cfg = filter(lambda c: c['name'] == cut, config['physics']['cuts'])[0]\n text = \"\"\n if 'max' not in cfg:\n text += \"#geq \"\n text += str(cfg['min'])\n if 'max' in cfg and cfg['max'] != cfg['min']:\n text += '-' + str(cfg['max']) + ' ' + label + 's'\n elif cfg['min'] != 1:\n text += ' ' + label + 's'\n else:\n text += ' ' + label\n return text", "def __prepare_title(self, input) -> str:\n return re.sub(r\"[^A-Za-z| ]+\", '', input)[:130]", "def get_name():", "def prep_titles(self, cost_title: str=\"\") -> (str, str):\n img_title = self.function_name + \\\n '_batch' + str(self.batch_size)\n\n if cost_title == \"\":\n img_title = str(self.experiment_count) + '_accuracy_plot_' + img_title\n title = self.title + \\\n '\\n' + self.function_name + \", \" + \\\n 'mini-batch size: ' + str(self.batch_size) + \\\n '\\nAvg Last 10 Epochs: Training ' + self.tr_mean_str + '%, Testing ' + self.test_mean_str + '%'\n else:\n img_title = str(self.experiment_count) + '_cost_plot_' + img_title\n title = cost_title\n\n print(f'\\nexperiment: {img_title}')\n return title, img_title", "def GetModernizedTestName(self, arg):\n return arg", "def _split_title(self, title):\n if not title:\n return '', ''\n if re.search(r'\\(.*\\)', title):\n return re.match(r'(\\w+)\\((.*)\\)', title).groups()\n else:\n return title, ''", "def tagify(text):\n if \"(\" in text:\n text = text.split(\"(\")[0]\n return text.title().replace(\" \", \"\")" ]
[ "0.70298654", "0.6522897", "0.6284885", "0.61326414", "0.60484356", "0.6029634", "0.6029412", "0.6012764", "0.59930116", "0.598101", "0.5968265", "0.59671205", "0.5940044", "0.59055644", "0.58885586", "0.5842299", "0.5840335", "0.58212703", "0.58197415", "0.5815073", "0.5795473", "0.5779788", "0.57524157", "0.5747572", "0.5714877", "0.5682866", "0.5673082", "0.56689775", "0.56633717", "0.56559885", "0.56530744", "0.5645975", "0.5636256", "0.5636256", "0.56222093", "0.56141335", "0.5612129", "0.5603441", "0.5584029", "0.5576218", "0.55727834", "0.55712384", "0.55611414", "0.55591047", "0.55514306", "0.55409294", "0.55409294", "0.55409294", "0.55409294", "0.55409294", "0.5540082", "0.55381125", "0.5514244", "0.5513036", "0.5510206", "0.55058867", "0.5501743", "0.54911745", "0.548556", "0.5469143", "0.546321", "0.5461895", "0.5461895", "0.54593265", "0.54553616", "0.54469943", "0.543903", "0.54376906", "0.54354143", "0.5429684", "0.5426965", "0.5424831", "0.54152524", "0.5409137", "0.54077023", "0.54042464", "0.54041123", "0.53961897", "0.5387371", "0.53868616", "0.53791815", "0.53784037", "0.5368277", "0.536601", "0.5351542", "0.53508025", "0.53489786", "0.5348492", "0.53401375", "0.5338101", "0.53377557", "0.5336507", "0.53326577", "0.53274125", "0.5326914", "0.5319616", "0.5312594", "0.5307338", "0.53072244", "0.5306952" ]
0.78655964
0
The Windows version of base.processInterrupt Note! This doesn't work terribly well with a lot of processes.
def processInterrupt(uPid): try: # pylint: disable=no-member win32console.GenerateConsoleCtrlEvent(win32con.CTRL_BREAK_EVENT, uPid); #GenerateConsoleCtrlEvent = ctypes.windll.kernel32.GenerateConsoleCtrlEvent #rc = GenerateConsoleCtrlEvent(1, uPid); #reporter.log('GenerateConsoleCtrlEvent -> %s' % (rc,)); fRc = True; except: reporter.logXcpt('uPid=%s' % (uPid,)); fRc = False; return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_process_interrupted(exc: \"KeyboardInterrupt\"):\n _print(f\"\\nInterrupted. {exc}\")", "def stop(self):\n # trying this instead of SIGTERM\n # http://stackoverflow.com/a/6659191/3380530\n # self._process.send_signal(SIGINT)\n # Or not. SIGINT doesn't exist on Windows\n self._process.terminate()", "def interrupt_kernel(self, kernel_id):", "def test_control_c_is_possible(self):\n if platform.type != \"posix\":\n raise SkipTest(\"I don't have the energy to fight Windows semantics.\")\n program = \"\"\"\\\nimport os, threading, signal, time, sys\nimport crochet\ncrochet.setup()\nfrom twisted.internet.defer import Deferred\n\nif sys.platform.startswith('win'):\n signal.signal(signal.SIGBREAK, signal.default_int_handler)\n sig_int=signal.CTRL_BREAK_EVENT\n sig_kill=signal.SIGTERM\nelse:\n sig_int=signal.SIGINT\n sig_kill=signal.SIGKILL\n\n\ndef interrupt():\n time.sleep(0.1) # Make sure we've hit wait()\n os.kill(os.getpid(), sig_int)\n time.sleep(1)\n # Still running, test shall fail...\n os.kill(os.getpid(), sig_kill)\n\nt = threading.Thread(target=interrupt, daemon=True)\nt.start()\n\nd = Deferred()\ne = crochet.EventualResult(d, None)\n\ntry:\n e.wait(10000)\nexcept KeyboardInterrupt:\n sys.exit(23)\n\"\"\"\n kw = {'cwd': crochet_directory}\n # on Windows the only way to interrupt a subprocess reliably is to\n # create a new process group:\n # http://docs.python.org/2/library/subprocess.html#subprocess.CREATE_NEW_PROCESS_GROUP\n if platform.type.startswith('win'):\n kw['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP\n process = subprocess.Popen([sys.executable, \"-c\", program], **kw)\n self.assertEqual(process.wait(), 23)", "def interrupt_kernel(self):", "def interrupt(self):\n raise NotImplementedError", "def interrupt_handler(signum, frame): #pylint: disable=W0613\n cleanup()\n sys.exit(-2) # Terminate process here as catching the signal\n # removes the close process behaviour of Ctrl-C", "def interrupt(self):\n # Access 'interrupted' with mutual exclusion\n with self.ilock:\n self.interrupted = True", "def siginterrupt(sig, flag): # real signature unknown; restored from __doc__\n pass", "def interrupt_hanged_processes(profile=\"bluefog\"):\n engine_pids = _get_ipengine_pid_from_file(profile)\n if engine_pids is None:\n raise FileNotFoundError(\"Cannot find pids to interrupt the engines. Note this\"\n \"function is supported under localhost mode only\")\n timeout = 0.2\n\n def send_request_to_rc(i):\n rc = ipp.Client(profile=profile)\n rc[i].apply_sync(lambda: 0)\n\n # Send an empty function to the workers. If it cannot be finished within the\n # {timeout} second, we assume the worker is hanged then send the interrupt\n # signal to it. If finished, do nothing.\n p_list = []\n for i in range(len(engine_pids)):\n p = multiprocessing.Process(target=send_request_to_rc, args=(i,))\n p.start()\n p_list.append(p)\n for i, p in enumerate(p_list):\n p.join(timeout)\n if p.exitcode is None:\n try:\n os.kill(engine_pids[str(i)], signal.SIGINT)\n print(f\"send signal to {engine_pids[i]}\")\n except:\n pass", "def checkInterrupt():\n if wasInterrupted():\n raise KeyboardInterrupt()", "def processKill(uPid):\n return processTerminate(uPid);", "def interrupt(self):\n self.interrupt_tick_tocking = True", "def interrupt(self):\r\n self.interrupting = True", "def interrupt(func):\n def do_stuff(*args, **kwargs):\n App.get_running_app().controller.interrupt(restart=True)\n return func(*args, **kwargs)\n return do_stuff", "def test_control_c_is_possible(self):\n if platform.type != \"posix\":\n raise SkipTest(\"I don't have the energy to fight Windows semantics.\")\n program = \"\"\"\\\nimport os, threading, signal, time, sys\nimport crochet\ncrochet.setup()\nfrom twisted.internet.defer import Deferred\n\nif sys.platform.startswith('win'):\n signal.signal(signal.SIGBREAK, signal.default_int_handler)\n sig_int=signal.CTRL_BREAK_EVENT\n sig_kill=signal.SIGTERM\nelse:\n sig_int=signal.SIGINT\n sig_kill=signal.SIGKILL\n\n\ndef interrupt():\n time.sleep(0.1) # Make sure we've hit wait()\n os.kill(os.getpid(), sig_int)\n time.sleep(1)\n # Still running, test shall fail...\n os.kill(os.getpid(), sig_kill)\n\nt = threading.Thread(target=interrupt, daemon=True)\nt.start()\n\n@crochet.%s\ndef wait():\n return Deferred()\n\ntry:\n wait()\nexcept KeyboardInterrupt:\n sys.exit(23)\n\"\"\" % (self.DECORATOR_CALL, )\n kw = {'cwd': crochet_directory}\n if platform.type.startswith('win'):\n kw['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP\n process = subprocess.Popen([sys.executable, \"-c\", program], **kw)\n self.assertEqual(process.wait(), 23)", "def suppress_keyboard_interrupt_message():\n old_excepthook = sys.excepthook\n\n def new_hook(type, value, traceback):\n if type != KeyboardInterrupt:\n old_excepthook(type, value, traceback)\n else:\n pass\n\n sys.excepthook = new_hook", "def setinterrupt(self, chr: int, /) -> None:", "def kill(self):\r\n try:\r\n if self.process:\r\n self.process.kill()\r\n self.process.wait()\r\n except WindowsError:\r\n # kill may not be available under windows environment\r\n pass", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(0)", "def interrupt_script(self, kind=\"default\"):\n pass", "async def keyboard_interrupt(self) -> None:\n self.logger.debug(\"Keyboard interrupt start\")\n print(\"Create task\")\n self.task = asyncio.create_task(self.target_coroutine)\n process_id = await self.get_process_id\n try:\n # Reason: only for Windows. pylint: disable=no-member\n os.kill(process_id, signal.CTRL_C_EVENT) # type: ignore\n print(\"Await task\")\n await self.task\n except KeyboardInterrupt:\n print(\"Await task in except\")\n # await self.task\n print(\"Assert\")\n assert not self.task.done()\n print(\"Task not done\")\n assert not self.task.cancelled()\n print(\"Task cancelled\")\n raise", "def stop_subprocesses():\n global message_interface\n global c_library_interface\n if message_interface:\n message_interface.stop()\n if c_library_interface:\n c_library_interface.stop()", "def runKeyboardInterruptable(target, *args, **kwargs ):\n kit = KeyboardInterruptable.KeyboardInterruptable(targetcmd=target, \n cancelException=carma.util.CancelException, args=args, \n kwargs=kwargs )\n try:\n kit.start()\n except Exception, ex:\n print 'Caught exception on dispatch!!'\n print ex\n done = True\n keyboardInterrupted = False\n done = False\n try:\n while not done:\n try:\n done = kit.doneWaiting()\n except KeyboardInterrupt:\n print \"\\nCancelling %s...\"%target.__name__\n cancel()\n keyboardInterrupted = True\n except Exception, ex:\n print ex\n done = True\n if not kit.cleanFinish:\n print kit.getExceptionInfo()\n return kit.ret\n except Exception, ex:\n print ex\n done = True\n finally:\n kit.join()\n if kit.successfulCancel: \n print \"%s successfully cancelled\" %target.__name__\n raise SuccessfulCancel\n if keyboardInterrupted and done:\n # The calling function didn't terminate from cancel\n # so reraise the interrupt.\n raise KeyboardInterrupt", "def ctrl_c(signum, frame):\n global shutdown_event\n raise SystemExit('\\nCancelling...')", "def terminate(process):\n\n def terminate_win(process):\n import win32process\n return win32process.TerminateProcess(process._handle, -1)\n\n def terminate_nix(process):\n import os\n import signal\n return os.kill(process.pid, signal.SIGTERM)\n\n terminate_default = terminate_nix\n\n handlers = {\n \"win32\": terminate_win, \n \"linux2\": terminate_nix\n }\n\n return handlers.get(sys.platform, terminate_default)(process)", "def keyboard_interrupt_handler(sig: int, _: object) -> None:\n logger.warning(f'KeyboardInterrupt (id: {sig}) has been caught...')\n logger.info('Terminating the session gracefully...')\n ray.shutdown()\n minio_leftovers = glob('*.part.minio')\n for leftover in minio_leftovers:\n Path(leftover).unlink()\n sys.exit(1)", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(1) # no match", "def handleKeyboardInterupt():\n System.stopExecution(TERMINATED_BY_USER)", "def _interrupt(self, signum: int, frame: Optional[Any]) -> None:\n if self._in_task(frame):\n raise KeyboardInterrupt\n else:\n self._interrupted = True\n self._ready_tasks.interrupt()", "def _KillProcess(self, pid):\n if sys.platform.startswith('win'):\n process_terminate = 1\n handle = ctypes.windll.kernel32.OpenProcess(\n process_terminate, False, pid)\n ctypes.windll.kernel32.TerminateProcess(handle, -1)\n ctypes.windll.kernel32.CloseHandle(handle)\n\n else:\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError as exception:\n logger.error('Unable to kill process {0:d} with error: {1!s}'.format(\n pid, exception))", "def _run(proc: Popen, timeout):\n try:\n return proc.wait(timeout=timeout)\n except TimeoutExpired:\n pass\n if sys.platform != 'win32':\n proc.send_signal(signal.SIGINT)\n try:\n return proc.wait(timeout=5)\n except TimeoutExpired:\n pass\n\n proc.terminate() # SIGTERM\n try:\n return proc.wait(timeout=5)\n except TimeoutExpired:\n pass\n\n proc.kill() # SIGKILL\n return proc.wait(timeout=5)", "def InterfaceClientStop(self, exitCode=200): \n pass", "def pkill(process_name):\n try:\n killed = os.system('taskkill /im ' + process_name)\n except Exception:\n killed = 0\n return killed", "def interrupt(self):\n ident = self.ident()\n print('{} for \"{}\" saw interrupt. Finishing in-progress task.'.format(\n ident,\n self.to_consume\n ))", "def _windows_power_control(self):\n\n os_power_command = 'shutdown /r /t 3' if self._power_event_type == 'restart' \\\n else 'shutdown /h /t 3'\n\n exit_code, out = self._staf_start_proc(os_power_command,\n self._sut.bespoke_root,\n self._command_timeout,\n location = self._sut.network_address)\n\n if exit_code != 0:\n raise CoreError('Power control event \"{0}\" failed: {1}'.format(self._name, out))", "def ctrlc_catcher(*excargs, **exckwargs):\n\n # Depending on the number of input arguments, we're either in Jupyter/iPython\n # or \"regular\" Python - this matters for actually handling the raised exception\n if len(excargs) == 3:\n isipy = False\n etype, evalue, etb = excargs\n else:\n shell, = excargs\n etype, evalue, etb = sys.exc_info()\n try: # careful: if iPython is used to launch a script, ``get_ipython`` is not defined\n get_ipython()\n isipy = True\n sys.last_traceback = etb # smartify ``sys``\n except NameError:\n isipy = False\n\n # Prepare to log any uncaught exceptions\n log = logging.getLogger(\"ACME\")\n\n # The only exception we really care about is a `KeyboardInterrupt`: if CTRL + C\n # is pressed, ensure graceful shutdown of any parallel processing clients\n if issubclass(etype, KeyboardInterrupt):\n try:\n client = dd.get_client()\n except ValueError:\n client = None\n if client is not None:\n for st in client.futures.values():\n st.cancel()\n client.futures.clear()\n dh.cluster_cleanup(client)\n log.debug(\"CTRL + C acknowledged, client and workers successfully killed\")\n\n # Relay exception handling back to appropriate system tools\n if isipy:\n shell.ipyTBshower(shell, exc_tuple=(etype, evalue, etb), **exckwargs)\n else:\n sys.__excepthook__(etype, evalue, etb)\n\n # Write to all logging locations, manually print traceback to file (stdout\n # printing was handled above)\n log.error(\"Exception received.\")\n memHandler = [h for h in log.handlers if isinstance(h, handlers.MemoryHandler)][0]\n if memHandler.target is not None:\n memHandler.acquire()\n with open(memHandler.target.baseFilename, \"a\", encoding=\"utf-8\") as logfile:\n logfile.write(\"\".join(traceback.format_exception_only(etype, evalue)))\n logfile.write(\"\".join(traceback.format_tb(etb)))\n memHandler.release()\n\n return", "def interruptable(func, *args, **opts):\r\n while True:\r\n try:\r\n result = func(*args, **opts)\r\n except IOError, e:\r\n if e.errno == errno.EINTR:\r\n continue\r\n raise e\r\n except OSError, e:\r\n if e.errno == errno.EINTR:\r\n continue\r\n raise e\r\n else:\r\n break\r\n return result", "def stop():\n\n crate = get_crate()\n # Tell the thread to stop\n crate.mch_comms.stop = True\n # Stop the ipmitool shell process\n try:\n if crate.mch_comms.ipmitool_shell:\n crate.mch_comms.ipmitool_shell.terminate()\n crate.mch_comms.ipmitool_shell.kill()\n except:\n pass", "def on_interrupt(self, *args) -> None: #pylint: disable=unused-argument\r\n if not self.stop_requested:\r\n self.stop_requested = True\r\n self.logger.critical('SIGINT detected - will stop at the end of the current evolution')\r\n else:\r\n stop_from = time.time() - 5000\r\n if self.last_stop_request > stop_from:\r\n raise KeyboardInterrupt\r\n else:\r\n self.last_stop_request = time.time()\r\n self.logger.critical('SIGINT suppressed - repeat within 5 seconds to sigterm')", "def interrupt(v):\n print(\" \" + bcolors.OKBLUE + \"[!] Detected CTRL+C ! restoring setting, please wait...\" + bcolors.ENDC)\n bash = \"ip link delete dummy type dummy\"\n os.system(bash)\n if v.spoof:\n restoreSpoof(v)\n if v.ntpStatus:\n ntpToggle(v)\n print(\" \" + bcolors.OKGREEN + \"Done\")\n print(\" --------------------------------------------------------\" + bcolors.ENDC)\n exit()", "def _restartProcessNormal(self) -> None:\n\n if IS_WIN_SVC in sys.argv:\n reactor.callFromThread(reactor.stop)\n return\n\n python = sys.executable\n argv = list(sys.argv)\n\n def addExe(val):\n if not \"run_peek_\" in val:\n return val\n if isWindows and not val.lower().endswith(\".exe\"):\n return val + \".exe\"\n return val\n\n argv = map(addExe, argv)\n os.execl(python, python, *argv)", "def interrupt(self):\n return True", "def wrapper(*args, **kwargs):\n try:\n method(*args, **kwargs)\n except KeyboardInterrupt:\n dummy.UselessStdout.write(\"ASDASDASD\")", "def interrumpe_espera_transbordo(self, proceso):\r\n proceso.interrupt(self)", "def delayed_keyboard_interrupt(self):\n return DelayedKeyboardInterrupt(self)", "def trap_ctrl_c_ctrl_break() -> None: # noqa\n\n signal.signal(signal.SIGINT, ctrl_c_trapper)\n signal.signal(signal.SIGTERM, sigterm_trapper)\n if platform.system() == \"Windows\":\n # SIGBREAK isn't in the Linux signal module\n # noinspection PyUnresolvedReferences\n signal.signal(signal.SIGBREAK, ctrl_break_trapper)", "def _on_parent_process_kill(self):", "def _stop_process(self):\n self.stdin_queue.put_nowait(\"quit\")\n ExternalProcess._stop_process(self)", "def cli(ctx):\n with process_manager.process_manager(**ctx.parent.cm_kwargs) as pm:\n pm.shutdown()", "def catch_keyboard_interrupt() -> Callable:\n return signal.signal(signal.SIGINT, keyboard_interrupt_handler)", "def interr(self, *args):\n return _ida_hexrays.Hexrays_Hooks_interr(self, *args)", "def test_signal_interruption(self):\n process = Popen(\n [STRATIS_CLI, \"pool\", \"create\",\n p_n(), DISKS[0]],\n stdout=PIPE,\n stderr=PIPE,\n close_fds=True,\n env=os.environ)\n time.sleep(0.05)\n process.send_signal(2)\n result = process.communicate()\n stdout_text = \"\"\n stderr_text = \"\"\n if result[0]:\n stdout_text = bytes(result[0]).decode(\"utf-8\")\n if result[1]:\n stderr_text = bytes(result[1]).decode(\"utf-8\")\n\n self.assertTrue(\"Traceback\" not in stdout_text)\n self.assertTrue(\"Traceback\" not in stderr_text)\n self.assertNotEqual(process.returncode, 0)", "async def keyboard_interrupt(self) -> None:\n print(\"Create task\")\n coroutine = self.run_example_use_case_and_raise(self.queue_log_record, set_log_level_as_debug)\n self.task = asyncio.create_task(coroutine)\n process_id = await self.get_process_id\n # Reason: only for Windows. pylint: disable=no-member\n os.kill(process_id, signal.CTRL_C_EVENT) # type: ignore\n print(\"Await task\")\n await self.task", "def OnCancelScripts(self, event):\n # self.shutdown()\n print(\"Cancel multiprocessor\")\n event.Skip()", "def interrupt_handler(signum, frame):\n if DOCKER_MONITOR:\n util.log.warning(\"Signal %d received - Tearing down monitoring\"\n % (signum))\n DOCKER_MONITOR.tear_down_all()\n sys.exit(0)", "def cancel_inner():\n kernel32.SetEvent(cancel_event)", "def interrupt_handler(self, signo, frame):\n log.debug(\"interrupting run\")\n self._keep_running = False", "def test2(stopEvent: Event):\n cmd = '\"{}\" \"automation_notepad.py\"'.format(sys.executable)\n p = subprocess.Popen(cmd)\n auto.Logger.WriteLine(cmd, auto.ConsoleColor.DarkGreen)\n while True:\n if None != p.poll():\n break\n if stopEvent.is_set():\n childProcesses = [pro for pro in psutil.process_iter() if pro.ppid == p.pid or pro.pid == p.pid]\n for pro in childProcesses:\n auto.Logger.WriteLine('kill process: {}, {}'.format(pro.pid, pro.cmdline()), auto.ConsoleColor.Yellow)\n p.kill()\n break\n stopEvent.wait(0.05)\n auto.Logger.WriteLine('test2 exits', auto.ConsoleColor.DarkGreen)", "def processTerminate(uPid):\n # pylint: disable=no-member\n fRc = False;\n try:\n hProcess = win32api.OpenProcess(win32con.PROCESS_TERMINATE, False, uPid);\n except:\n reporter.logXcpt('uPid=%s' % (uPid,));\n else:\n try:\n win32process.TerminateProcess(hProcess, 0x40010004); # DBG_TERMINATE_PROCESS\n fRc = True;\n except:\n reporter.logXcpt('uPid=%s' % (uPid,));\n hProcess.Close(); #win32api.CloseHandle(hProcess)\n return fRc;", "def cli():\n signal.signal(signal.SIGINT, signal_handler)\n pass", "def wait():\r\n win32event.WaitForSingleObject(hProcess,\r\n win32event.INFINITE)\r\n returncode = win32process.GetExitCodeProcess(hProcess)\r\n return returncode", "def _kill_kernel(self):", "def interrupted(self):\n return self.__interrupted", "def test_udp_keyboard_interrupt():\n with pytest.raises(KeyboardInterrupt):\n cmd = [\"python\", \"dnsck/dnsck.py\", \"-s\", \"8.8.8.8\", \"google.com\"]\n process = subprocess.Popen(cmd, shell=False)\n sleep(3)\n os.kill(process.pid, SIGINT)\n raise KeyboardInterrupt", "def infinite_loop(func):\n @wraps(func) # Preserve target's metadata\n def wrapper(*args, **kwargs):\n while True:\n try:\n func(*args, **kwargs)\n except KeyboardInterrupt:\n break\n return wrapper", "def on_KeyboardInterrupt(player):\n print(\"paused by KeyboardInterrupt\")\n player.edit()", "def wrap_keyboard_except(method):\n def wrapper(*args, **kwargs):\n \"\"\"\n Try function, absorb KeyboardInterrupt and leave gracefully.\n \"\"\"\n try:\n method(*args, **kwargs)\n except KeyboardInterrupt:\n dummy.UselessStdout.write(\"ASDASDASD\")\n return wrapper", "def _AbortTerminate(self):\n for pid, process in self._processes_per_pid.items():\n if not process.is_alive():\n continue\n\n logger.warning('Terminating process: {0:s} (PID: {1:d}).'.format(\n process.name, pid))\n process.terminate()", "def abortEvent(self,event):\n # TODO: make interactorObserverTags a map to we can\n # explicitly abort just the event we handled - it will\n # be slightly more efficient\n for tag in self.interactorObserverTags:\n cmd = self.interactor.GetCommand(tag)\n if cmd is not None:\n cmd.SetAbortFlag(1)", "def processStop(name):\n imrclient.update_server_info()\n imrclient.process_stop(name)", "def SIGINT(self, signum, frame):\n for t in [t for t in threading.enumerate() if t.name != 'MainThread']:\n if hasattr(t, 'stop') and callable(t.stop):\n t.stop()\n\n for t in [t for t in threading.enumerate() if t.name != 'MainThread']:\n t.join()\n\n self.exit(1)", "def stopProcesses(*args):\n _stopProcessSet(_running)", "def halt_cmd(cmd, cnt, args):\n log(\"halt\") # need an interrupt handler to do this\n cpu.halt()", "def test_call_upload__interrupt(self, syn):\n self._test_call_upload__part_exception(\n syn,\n KeyboardInterrupt,\n SynapseUploadAbortedException,\n )", "def kill_excel_bg():\n excel_process = [\n process for process in psutil.process_iter() if process.name() == \"EXCEL.EXE\"\n ]\n for process in excel_process:\n xl_files = [f.path for f in process.open_files() if \".xl\" in f.path]\n print(xl_files)\n if len(xl_files) == 0:\n process.kill()", "def abort(self, kill=False):\n log.warning(\"Received abort request\")\n self.current_exposure = None\n if self.getstate() == 'running':\n if kill:\n self.process.kill()\n else:\n self.process.terminate()\n with open(self.logfilename, 'a') as f:\n print(\"!!!!!! process killed by user !!!!!!!\", file=f)", "def catch_kb_interrupt(output_directory):\n # Rename output dir\n src = output_directory\n if src.endswith(\"/\"):\n src = src[:-1]\n dst = src + \"_interrupted\"\n\n logger.error(f\"Keyboard interruption catched.\")\n logger.error(f\"Moving output directory from\")\n logger.error(src)\n logger.error(\"to\")\n logger.error(dst)\n\n shutil.move(src, dst)", "def terminate_proc(proc_name=None, proc_id=None):\n assert proc_name or proc_id, \"Neither 'proc_name' nor 'proc_id' are passed.\"\n if sys.platform == \"win32\":\n if proc_name:\n query = ['/fi', 'IMAGENAME eq %s' % proc_name]\n elif proc_id:\n query = ['/fi', 'PID eq %s' % proc_id]\n output = killableprocess.check_output(\n ['tasklist',\n '/nh', # don't display column headers\n '/fo', 'CSV'] # --> \"MyApp.exe\",\"4380\",\"Console\",\"1\",\"395.604 K\"\n + query)\n output = output.decode(sys.getfilesystemencoding())\n proc_ids = []\n for line in output.decode(sys.getfilesystemencoding()).split(\"\\n\"):\n line = line.replace(\"\\r\", \"\")\n if '\"' in line:\n proc_ids.append(eval('[%s]' % line)[1])\n for id in proc_ids:\n killableprocess.call(['taskkill', '/f', '/t', '/pid', id])\n else:\n pass # necessary ?", "def CloseProcessHandle(process_handle: int) -> int:\n return kernel32.CloseHandle(process_handle)", "def _signal_workaround(shutdown: asyncio.Future) -> None:\n\n async def wakeup() -> None:\n while not shutdown.done():\n await asyncio.sleep(0.1)\n\n def signint_handler(signalnum: int, frame: types.FrameType):\n shutdown.set_result(None)\n\n signal.signal(signal.SIGINT, signint_handler)\n asyncio.get_running_loop().create_task(wakeup())", "def ctrl_break_trapper(signum: int, stackframe) -> None:\n log.critical(\n \"Ignoring CTRL+BREAK (signal {}); use the GUI to quit\", signum\n )", "def _init_worker():\n signal.signal(signal.SIGINT, signal.SIG_IGN)", "def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)", "def graceful_exit():\n print(\"KeyboardInterrupt, exit(1)\")\n exit(1)", "def test_interrupt(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.add_event_detection') as mock_detection:\n with patch('RPi.GPIO.add_event_callback') as mock_callback:\n gpio.interrupt(self._callback, 0)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()\n mock_detection.called_once_with(0, GPIO.BOTH)\n mock_callback.called_once_with(0, self._callback)", "def TerminalClientStop(self, exitCode=200):\n pass", "def interrupted(self):\n print(\"Macro interrupted!\")", "def test_debug_wrapper_keyboard_interrupt(mock_env_get, mock_run_command,\n debug_on_fail):\n mock_env_get.return_value = debug_on_fail\n\n # simulate KeyboardInterrupt\n mock_run_command.side_effect = KeyboardInterrupt()\n\n with pytest.raises(KeyboardInterrupt):\n kubernetes_debug_wrapper.setup_debug_wrapper()", "def ctrl_c_trapper(signum: int, stackframe) -> None:\n log.critical(\"Ignoring CTRL+C (signal {}); use the GUI to quit\", signum)", "def unavailable_process(**kwargs):\n return LazySubprocessTester([sys.executable, \"-c\", \"import sys; sys.exit(1)\"], **kwargs)", "def stopProcess(self):\n if self.__hgClient.isExecuting():\n self.__hgClient.cancel()", "def finish(self):\n super(InterruptibleMixin, self).finish()\n signal(SIGINT, self.original_handler)", "def signalProcess(self, processTag=None, event='cancel'):\r\n if processTag is None:\r\n self.mutex.clear()\r\n while not self.asyncQueue.empty():\r\n self.asyncQueue.get()\r\n self.asyncQueue.task_done()\r\n while not self.syncQueue.empty():\r\n self.syncQueue.get()\r\n self.syncQueue.task_done()\r\n toQueue = [PROCESS_MESSAGE, None, None, [], {}, self.processMessage()]\r\n for k in range(self.activeWorkers):\r\n self.asyncQueue.put(toQueue)\r\n return\r\n elif processTag not in self.activeList:\r\n raise Exception(\"%s is not an active process\"%processTag)\r\n cancel_event, wait_event, outProcessor = self.activeList[processTag]\r\n if event == 'cancel':\r\n cancel_event.clear()\r\n elif event == 'wait':\r\n wait_event.clear()\r\n args = (PROCESS_PAUSE, processTag,'Proceso pausado')\r\n kwargs = {}\r\n outProcessor(*args, **kwargs)\r\n\r\n elif event == 'resume':\r\n wait_event.set()\r\n args = (PROCESS_PAUSE, processTag, 'Proceso reanudado')\r\n kwargs = {}\r\n outProcessor(*args, **kwargs)\r\n else:\r\n cancel_event.clear()", "def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False", "def init_worker():\n signal.signal(signal.SIGINT, signal.SIG_IGN)", "def test_stopProcessForcedKill(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.reactor.advance(self.pm.threshold)\r\n proc = self.pm.protocols[\"foo\"].transport\r\n # Arrange for the fake process to live longer than the killTime\r\n proc._terminationDelay = self.pm.killTime + 1\r\n self.pm.stopProcess(\"foo\")\r\n # If process doesn't die before the killTime, procmon should\r\n # terminate it\r\n self.reactor.advance(self.pm.killTime - 1)\r\n self.assertEqual(0.0, self.pm.timeStarted[\"foo\"])\r\n\r\n self.reactor.advance(1)\r\n # We expect it to be immediately restarted\r\n self.assertEqual(self.reactor.seconds(), self.pm.timeStarted[\"foo\"])", "def kill_gracefully(process, timeout=2):\n try:\n with suppress(ProcessLookupError):\n process.terminate()\n stdout, stderr = process.communicate(timeout=timeout)\n except TimeoutExpired:\n process.kill()\n stdout, stderr = process.communicate()\n\n return process.returncode, stdout, stderr", "def sigterm(signum, frame):\n loop.stop()", "def stop_and_restart():\n U.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)" ]
[ "0.63215697", "0.5775936", "0.57586217", "0.5744429", "0.5740155", "0.5561683", "0.5558111", "0.5497653", "0.5479278", "0.54543006", "0.54173666", "0.541069", "0.5367018", "0.53545386", "0.5337776", "0.53182185", "0.5305628", "0.52493083", "0.522078", "0.5160787", "0.51373094", "0.5127463", "0.51083744", "0.5097883", "0.5093123", "0.5051432", "0.503602", "0.50336325", "0.50290835", "0.5011577", "0.49940863", "0.498145", "0.4974007", "0.49512565", "0.49508438", "0.4948522", "0.49080595", "0.49046063", "0.48881394", "0.48764995", "0.48681796", "0.48662147", "0.483622", "0.4824302", "0.4802598", "0.47952068", "0.47836182", "0.477054", "0.47622493", "0.4751859", "0.47496805", "0.4741908", "0.47316882", "0.4725102", "0.4701992", "0.4687539", "0.46849918", "0.46793047", "0.46728766", "0.46623904", "0.4651873", "0.46492833", "0.46438187", "0.46415505", "0.46037865", "0.4598778", "0.45974123", "0.4594846", "0.45932645", "0.4593261", "0.4585246", "0.45783845", "0.45765087", "0.4572852", "0.4567464", "0.45614046", "0.4557215", "0.45453054", "0.45063716", "0.44979918", "0.4489683", "0.4487784", "0.44767925", "0.44756845", "0.44707084", "0.447068", "0.44641837", "0.44615847", "0.4459978", "0.4458832", "0.44561732", "0.44527078", "0.44497526", "0.44480014", "0.4445708", "0.44432583", "0.44389445", "0.44387215", "0.4436839", "0.4420803" ]
0.67192686
0
Posts a WM_CLOSE message to the specified thread.
def postThreadMesssageClose(uTid): fRc = False; try: win32api.PostThreadMessage(uTid, win32con.WM_CLOSE, 0, 0); # pylint: disable=no-member fRc = True; except: reporter.logXcpt('uTid=%s' % (uTid,)); return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postThreadMesssageQuit(uTid):\n fRc = False;\n try:\n win32api.PostThreadMessage(uTid, win32con.WM_QUIT, 0x40010004, 0); # DBG_TERMINATE_PROCESS # pylint: disable=no-member\n fRc = True;\n except:\n reporter.logXcpt('uTid=%s' % (uTid,));\n return fRc;", "def close(self):\n\n self.queue.put(\"EXITTHREAD\")\n logging.info(\"in close in thread\")\n try:\n # send closing message immediately\n if self.ircSocket:\n self.ircSocket.send(\n (\n f\"PRIVMSG {self.channel} :closing opponent\"\n \" bot\\r\\n\").encode('utf8')\n )\n while self.channelThread.is_alive():\n pass\n self.running = False\n if self.messageBufferTimer:\n self.messageBufferTimer.cancel()\n except Exception as e:\n logging.error(\"In close\")\n logging.error(str(e))\n logging.exception(\"Exception : \")", "def closeEvent(self, _):\n for thread in self.threads:\n thread.stop()\n thread.wait()", "def closeEvent(self, event):\n # Stop the run loop in the mandelbrot thread\n self.pipe_to_mandelbrot_thread.send(\"STOP\")\n # Shutdown queues to allow underlying processes to join\n self.display_queue.close()\n self.mandelbrot_queue.close()\n # Join is blocking - waits for thread to exit nicely\n self.mandelbrot_thread.join()\n # Once the compute thread is done, accept the original close event\n event.accept()", "def closeEvent(self, event):\n self._thread.terminate()\n self._thread.wait()\n event.accept()", "def on_closing(event=None):\r\n my_msg.set(\"{quit}\")\r\n send()", "def terminate(self):\n print('Terminating Revshell thread.')\n self.server.close()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def close(self):\r\n self._sendLock.acquire()\r\n try:\r\n self._queue.put(\"CLOSE\")\r\n self._eventQueue.put((time.time(), \"CLOSE\"))\r\n self._closed = 1\r\n self._s.close()\r\n self._thread.join()\r\n self._eventThread.join()\r\n finally:\r\n self._sendLock.release()", "def _terminate_thread(thread):\n if not thread.isAlive():\n return\n\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"nonexistent thread id\")\n elif res > 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def close(self):\n self.loop.call_soon_threadsafe(self.stop_flag.set_result, True)\n self.server_thread.join()", "def bcp_goodbye(self, **kwargs):\n if self.config['mediacontroller']['exit_on_disconnect']:\n self.socket_thread.sending_thread.stop()\n sys.exit()", "def ev_windowclose(self, event: tcod.event.WindowEvent) -> T | None:", "def TTAPI_ShutdownCompleted(self, sender, e):\r\n # Shutdown the Dispatcher\r\n if self.m_disp != None:\r\n self.m_disp.BeginInvokeShutdown()\r\n self.m_disp = None", "def TTAPI_ShutdownCompleted(self, sender, e):\r\n # Shutdown the Dispatcher\r\n if self._m_disp != None:\r\n self._m_disp.BeginInvokeShutdown()\r\n self._m_disp = None", "def close(self):\n if not self.closed:\n log.debug(\"Closing worker thread\")\n\n self.closed = True\n if self._wait:\n self._wait.set()", "def Quit(self):\n t = threading.Thread(target=self.server.shutdown)\n t.start()", "def on_close(self, event):\r\n if self.thread is not None:\r\n self.thread.abort = True\r\n if self.tester is not None:\r\n try:\r\n self.tester.Close()\r\n except:\r\n pass\r\n self.close_debug_console()\r\n event.Skip()", "def main_thread_exit(self):\n ...", "def shutdown(self, signum, frame):\n self.log('WARNING', -1, 'Shutting down normally ...')\n main_thread = threading.current_thread()\n\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n self.server_socket.close()\n sys.exit(0)", "def shutdown(self):\n self.socket_thread.stop()", "def shutdown(self):\n self.thread.server.shutdown()\n self.thread.join()", "def closeEvent(self, evt):\n self.__shutdown()", "def shutdown(self, signum, frame):\n self.serverSocket.close()\n sys.exit(0)", "def kill(self, threadid):\n self.rpc.call(MsfRpcMethod.CoreThreadKill, [threadid])", "def CloseForum(self, event):\n pass", "def on_close(self):\n print('[INFO] closing...')\n self.stopEvent.set()\n del self.tello\n self.root.quit()", "def OnClose(self, event):\r\n if self.worker: #stop main GPIB thread\r\n self.worker.abort()\r\n time.sleep(0.3)\r\n self.Destroy()", "def stop(self):\n log.debug(\"CNC thread stopping...\")\n if self.running:\n self.running = False\n self.sendCommand(\"?\")\n self.join()\n log.debug(\"CNC thread stopped\")", "def signal_handler(signum, frame):\n main.CLOSE = True", "def _disconnect(self):\n self.socket.send_json({\"worker_id\": self.socket_id, \"message\": \"disconnect\"})\n self.socket.close()\n self.context.term()\n exit()", "def quit_application(self, event):\n self.Close()\n server.closeSocket()", "def signal_shutdown(self, sig, frame):\n _LOGGER.debug('%s received; shutting down...',\n signal.Signals(sig).name) # pylint: disable=no-member\n self._shutdown = True\n if self._get_task:\n self._get_task.cancel()\n\n # Issue #8 - Cancel not processed until next message added to queue.\n # Just put a dummy object on the queue to ensure it is handled immediately.\n self._pending_messages.sync_q.put_nowait(None)", "def quit_worker(self):\n\n if self.isRunning():\n # TODO: Should find a better way of doing this by setting an external flag\n self.terminate()\n self.wait()\n\n self.pb.close()\n self.parent.setEnabled(True)\n self.parent.statusbar.showMessage(\"\")", "def on_closebutton_handle_clicked(self, _widget):\n self._terminate.set()\n self.top_widget.destroy()\n self.top_widget = None", "def test_Connector_close_kills_thread() -> None:\n # open and close Connector object\n connector = Connector()\n # verify background thread exists\n assert connector._thread\n connector.close()\n # check that connector thread is no longer running\n assert connector._thread.is_alive() is False", "def handle_quit( self ):\n print \"bye!\"\n\n # really close the window\n return True", "def on_close(self):\n self.subscrib.unsubscribe(self.channel)\n self.thread.stop()", "def close(self, timeout=5):\n self.run(self.__stop.set)\n\n with self.__stop_lock:\n LOGGER.info(\"Canceling pending tasks.\")\n for task in asyncio.Task.all_tasks(loop=self.loop):\n LOGGER.error(task)\n task.cancel()\n\n LOGGER.info(\"Closing event loop.\")\n self.loop.stop()\n self.loop.close()\n self.loop = None\n\n LOGGER.info(\"Waiting for thread.\")\n self.thread.join(timeout=timeout)\n self.thread = None", "def __stop(self, reason=\"user\"):\n\n print(f\"trying to close because {reason}\")\n try:\n self.__main_window.destroy()\n except:\n print(\"failed to close the main window\")\n\n #self.__message_sender.stop = True TO BE FIXED\n\n self.__telegram_controller.stop()\n self.__bus_controller.stop()\n sys.exit(f\"properly closed by {reason}\")", "def closeEvent(self, event):\n\n sys.exit()", "def handle_close(self, msg):\n self.log.debug(\"handle_close[%s](%s)\", self.comm_id, msg)\n if self._close_callback:\n self._close_callback(msg)", "def cb_close(self, *args):\n Gtk.main_quit()", "def UnregisterMessageHandler(self, timeout=None):\n if self.handler_thread:\n self.handler_stop = True\n self.handler_thread.join(timeout)\n if self.handler_thread.is_alive():\n raise RuntimeError(\"Message handler thread did not join in time.\")\n self.handler_thread = None", "def handle_close(event):\n self.fig.canvas.stop_event_loop()\n self.raiseAMessage('Closed Figure')", "def __on_close(self):\n # Release the resource and\n # close the windows\n LOGGER.info(\"closing...\")\n self.__quit.set()\n self.__detect.end()\n self.root.quit()", "def stop_cb(evt):\n print('CLOSING on {}'.format(evt))\n nonlocal done\n done = True", "def close_signal_window(self, closed_window):\n for i, asig in enumerate(self.receive_list):\n (sig, var, window) = asig\n if window is closed_window:\n self.receive_list[i] = (sig, var, None)\n closed_window.destroy()", "def stop_cb(evt):\n print('CLOSING on {}'.format(evt))\n nonlocal done\n done = True", "def _onExit(self, event):\n self.Close(True)", "def cleanThread(self):\n logging.info(\"Clean Thread\")\n self.thread.quit()\n self.thread.wait()", "def onExitButtonClicked(self, widget):\n self.getGtkTopObject().close()", "def stop(self):\n self.thread.join()", "def ev_windowclose(self, event: WindowEvent) -> None:", "def _quit():\r\n\twin.quit()\r\n\twin.destroy()\r\n\tquit()", "def msg_close(version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(CLOSE, \"\", \"\", version, order)", "def closeEvent(self, event):\n self.exit()\n event.accept()", "def close(self):\n self.exit_set = True\n self.sql_queue.put((self.exit_token, \"\", \"\"), timeout=5)\n # Sleep and check that the thread is done before returning.\n while self.thread_running:\n time.sleep(.01) # Don't kill the CPU waiting.", "def OnCloseWindow(self, event):\r\n self.data.close()\r\n sizes[self.data.__class__.__name__] = self.GetSizeTuple()\r\n self.Destroy()", "def closeEvent(self, event):\n self._parent.quit_application(event)", "def close(self):\n self.control_conn.sendall('CLOSE'.encode())", "def close(self):\n win32api.CloseHandle(self._kwargs['Path'])\n win32api.CloseHandle(self._overlapped.hEvent)", "def tearDownZServerThread(self):\n self.zserver.close()\n self.zthread.stop()", "def exit_btn_callback(evt):\n print(\"Inside exit_btn_callback. Event object is: \", evt)\n mainwin.destroy()", "def closeWindow(self):\n cmdId = self.executeCommand(Command.CLOSE)\n return cmdId", "def close(self):\n self.stop.set() # Alert threads to stop using stop event\n # Send empty packet to readerThread to break it out of blocking recv\n self.sock.sendto(\"\", self.sock.getsockname())\n self.readerThread.join()\n self.cleanerThread.join()\n self.sock.close()", "def OnExit(self, event):\r\n self.Close(True)", "def __window_close(self):\n pass", "def quit(self):\n\n logging.warning(\"IPC: Quitting.\")\n try:\n self.subprocess.terminate()\n except OSError:\n logging.warning(\"Failed to terminate subprocess.\")\n self.thread_stop.set()\n\n if self.reader_thread:\n try:\n self.reader_thread.join()\n except RuntimeError:\n pass", "def stop(self):\n if not self.done:\n self.log.info(\"Socket thread stopping.\")\n self.sending_queue.put('goodbye')\n time.sleep(1) # give it a chance to send goodbye before quitting\n self.done = True\n self.mc.done = True", "def close(self):\n logging.info(\"Closing controller\")\n self.controller.close()\n\n # Send close event to UDP thread\n logging.info(\"Set event to close UDP_Listener\")\n self.UDP_ListenerEvent.clear()\n\n # Send close event to serial thread\n logging.info(\"Set event to close Serial_Listener\")\n self.Serial_ListenerEvent.clear()\n\n # Send close event to controller event loop\n logging.info(\"Set event to close ControllerEventLoop\")\n self.ControllerEventLoop_ListenerEvent.clear()\n\n self.master.after(0, self.master.destroy)", "def close(self):\n self.state = False\n self.mainwindow.sendMessage('a')\n print(\"closing \" + self.name)", "def _process_quit(self, process_instance):\n process_instance.errcause = \"quitting process\"\n\n # Give the process notice to quit doing stuff.\n process_instance.quit()\n\n # Terminate IonProcessThread (may not have one, i.e. simple process)\n # @TODO: move this into process' on_quit()\n if getattr(process_instance, '_process', None) is not None and process_instance._process:\n process_instance._process.notify_stop()\n process_instance._process.stop()", "def quit(self, message):\n self.write(['QUIT'], message)\n self.hasquit = True\n # Wait for acknowledgement from the server. By RFC 2812 it should be\n # an ERROR msg, but many servers just close the connection. Either way\n # is fine by us.\n # Closing the connection now would mean that stuff in the buffers that\n # has not yet been processed would never be processed. It would also\n # release the main thread, which is problematic because whomever called\n # quit might still want to do something before main thread quits.", "def close(self):\n self.worker.close()\n if self._own_loop:\n self.loop.close()", "def finish(self):\n global log_th\n log_th.log_info('{} disconnected from {} thread'.format(self.client_ip, threading.current_thread().name))", "def close(self):\n if not self._closed:\n self._thread_finalizer()\n self._process = None\n self._closed = True", "def closeEvent(self, event):\n log.info(\"Received window close event.\")\n self.main.app_is_exiting()\n super().closeEvent(event)\n return", "def close(self):\n self.log_debug(\"Waiting for processing thread to close...\")\n self.input_processing_running = False\n\n if self.input_thread is not None:\n self.input_thread.join()", "def window_close():\n response = messagebox.askokcancel(title=\"Exit?\",\n message=\"Are you sure you want to close the program?\")\n if response == True:\n root.destroy() # Closes the window\n # Else: The program continues as normal", "def on_close(self):\n if self.httpd:\n self.httpd.shutdown()\n self.httpd.server_close()\n self.httpd_thread.join()\n self.master.destroy()", "def stop_thread(self):\n t, e = self.workers[0]\n e = e.set() # put event to set True for stop thread\n del self.workers[0]", "def shutdown(self, *args, **kwargs):\n # Set shared variable to 0 to signal shutdown\n logger.debug(\"Setting value to cancel\")\n self.cancel_value.value = 0\n\n self.submit_process.join()\n self.collector_thread.join()\n\n return True", "def close(self):\n self.send(ActorExit)", "def exit_event(self, event):\n self.root.quit()", "def finishThread(self):\n logging.info(\"Fin Thread\")\n self.buildCreatedDict()\n self.cleanThread()\n self.accept()", "def quit(self, widget, data=None):\n self.destroy()", "def close(self):\n\n self.shm_command.write({'cmd': 'close', 'data': {}})\n time.sleep(0.2)", "def stop(self):\n logging.info(\"Shutting down thread...\")\n self.disconnect()\n self.running = False", "def DialogClose_clicked_cb(self, data=None):\n try:\n self.timer.cancel()\n self.timer = None\n except AttributeError:\n logging.warning(\"CloseDialog timer was already stopped\")\n self.builder.get_object('InfoDialog').hide()", "def closeEvent(self, event):\n sys.exit(0)", "def shutdown(self):\r\n method = moduleName + '.' + self.className + '.' + 'shutdown'\r\n if self.terminationStarted == False:\r\n #inform the manage that we wish to terminate\r\n # Then wait 50 miliseconds before retrying dQueue\r\n self.terminationStarted = True\r\n self.commQueue.put( [self, terminationSteps.START, self.localCommQueue] )\r\n self._stopevent.wait(self._sleepperiod) \r\n try:\r\n self.awaitVerification()\r\n self.finalizeShutdown()\r\n #raise SystemExit()\r\n sys.exit()\r\n except Exceptions.WorkerThreadTerminationRollback:\r\n raise Exceptions.WorkerThreadTerminationRollback()\r\n except Exceptions.WorkerThreadIndexError:\r\n # AE returned terminationVerificationMsg.ERROR\r\n self.finalizeShutdown(False)\r\n raise SystemExit()\r\n except Exception as e:\r\n errorMsg = \"Abnormal termination start of worker thread %s, for landmark %s. Traceback = %s\" %(self._Thread__name, self.queueID, e)\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n self.finalizeShutdown(False)\r\n raise SystemExit(errorMsg)\r\n else:\r\n #We are truly ready to close\r\n try:\r\n self.finalizeShutdown()\r\n except Exceptions.WorkerThreadTerminationRollback:\r\n raise Exceptions.WorkerThreadTerminationRollback()\r\n except Exception:\r\n #errors already logged in the finalizeShutdown method\r\n pass", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def closeEvent(self, event):\r\n self.isWinOpen=False\r\n time.sleep(0.1)\r\n event.accept()" ]
[ "0.65258104", "0.647808", "0.6194087", "0.6059737", "0.6028249", "0.5981587", "0.59607416", "0.5957837", "0.5957837", "0.5957837", "0.5957837", "0.5846168", "0.58057123", "0.57886297", "0.574851", "0.5722326", "0.5718076", "0.57172066", "0.5709524", "0.5701404", "0.5693792", "0.568644", "0.56556547", "0.5641714", "0.56399125", "0.56164086", "0.5593753", "0.55523497", "0.553725", "0.5534733", "0.55254775", "0.5496856", "0.54956573", "0.5476098", "0.5470201", "0.54646367", "0.5420357", "0.54185295", "0.5397506", "0.5395882", "0.5391597", "0.5384067", "0.5368524", "0.53582877", "0.535608", "0.53448725", "0.5340409", "0.534033", "0.532276", "0.530216", "0.530199", "0.5300289", "0.52833074", "0.5282441", "0.5268673", "0.5261564", "0.5251045", "0.5249244", "0.52470917", "0.5226072", "0.5221498", "0.52139056", "0.52098763", "0.52079695", "0.51932734", "0.5192444", "0.518896", "0.5185659", "0.51817065", "0.51799864", "0.5157845", "0.5156094", "0.51507604", "0.5148234", "0.51480967", "0.51433295", "0.5138177", "0.5131247", "0.5125083", "0.5124008", "0.5118976", "0.51165444", "0.51156974", "0.5113587", "0.5113347", "0.5109172", "0.51032346", "0.5102155", "0.51009315", "0.5098854", "0.509821", "0.509479", "0.5094401", "0.5094189", "0.5089562", "0.50868446", "0.50868446", "0.50868446", "0.50868446", "0.50685894" ]
0.7335398
0
Posts a WM_QUIT message to the specified thread.
def postThreadMesssageQuit(uTid): fRc = False; try: win32api.PostThreadMessage(uTid, win32con.WM_QUIT, 0x40010004, 0); # DBG_TERMINATE_PROCESS # pylint: disable=no-member fRc = True; except: reporter.logXcpt('uTid=%s' % (uTid,)); return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postThreadMesssageClose(uTid):\n fRc = False;\n try:\n win32api.PostThreadMessage(uTid, win32con.WM_CLOSE, 0, 0); # pylint: disable=no-member\n fRc = True;\n except:\n reporter.logXcpt('uTid=%s' % (uTid,));\n return fRc;", "def quit(self, message):\n self.write(['QUIT'], message)\n self.hasquit = True\n # Wait for acknowledgement from the server. By RFC 2812 it should be\n # an ERROR msg, but many servers just close the connection. Either way\n # is fine by us.\n # Closing the connection now would mean that stuff in the buffers that\n # has not yet been processed would never be processed. It would also\n # release the main thread, which is problematic because whomever called\n # quit might still want to do something before main thread quits.", "def Quit(self):\n t = threading.Thread(target=self.server.shutdown)\n t.start()", "def ev_QUIT(self, event):\n raise SystemExit()", "def quit(self):\n self.socket.send(\"QUIT\")", "def _terminate_thread(thread):\n if not thread.isAlive():\n return\n\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"nonexistent thread id\")\n elif res > 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def main_thread_exit(self):\n ...", "def ev_quit(self, event: tcod.event.Quit) -> T | None:", "def do_quit(self, arg):\n self.do_exit(arg)", "def onQuit(self, eventDict = None):\n self.mainApp.quit()", "def cleanThread(self):\n logging.info(\"Clean Thread\")\n self.thread.quit()\n self.thread.wait()", "def _quit(self):\n self.parent.quit() # stops mainloop\n self.parent.destroy() # this is necessary on Windows to prevent\n # Fatal Python Error: PyEval_RestoreThread: NULL tstate\n reactor.stop()", "def do_quit(self, arg):\n exit()", "def _quit():\r\n\twin.quit()\r\n\twin.destroy()\r\n\tquit()", "def ftp_QUIT(self, line):\n # From RFC-959:\n # This command terminates a USER and if file transfer is not\n # in progress, the server closes the control connection.\n # If file transfer is in progress, the connection will remain\n # open for result response and the server will then close it.\n if self.authenticated:\n msg_quit = self.authorizer.get_msg_quit(self.username)\n else:\n msg_quit = \"Goodbye.\"\n if len(msg_quit) <= 75:\n self.respond(\"221 %s\" %msg_quit)\n else:\n self.push(\"221-%s\\r\\n\" %msg_quit)\n self.respond(\"221 \")\n\n if not self.data_channel:\n self.close_when_done()\n else:\n # tell the cmd channel to stop responding to commands.\n self.quit_pending = True\n\n\n # --- data transferring", "def shutdown(self, signum, frame):\n self.log('WARNING', -1, 'Shutting down normally ...')\n main_thread = threading.current_thread()\n\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n self.server_socket.close()\n sys.exit(0)", "def SignalHandler_Quit(signum, frame):\n log('Received signal to quit: %s' % signum)\n \n global RUNNING\n RUNNING = False", "def quit_cmd(self):\n print_debug(\"Executing QUIT\")\n command = \"QUIT\\r\\n\"\n msg_rec = self.send_and_log(self.s, command)\n self.close_socket(self.s) # Close socket since we're done.\n return msg_rec", "def quit(self):\r\n \r\n self.qapp.quit()", "def shutdown():\n\tglobal StoreWorkerThread, StoreWorkerThreadLock\n\n\tStoreWorkerThreadLock.acquire()\n\t\n\tif not running():\n\t\t# for convenience, this is not an error\n\t\tStoreWorkerThread = None\n\t\tStoreWorkerThreadLock.release()\n\t\treturn\n\t\t\n\t# send 'quit' command\n\tStoreCmdQueue.put(('quit',))\n\t\n\t# wait for thread to exit\n\tStoreWorkerThread.join()\n\tStoreWorkerThread = None\n\t\n\tStoreWorkerThreadLock.release()", "def do_quit(self, args):\n quit()", "def OnQuit(self, e):\n\t\tself.EndRun()", "async def chat_quit(self, event):\n await self.send_json(\n return_value(\n ACTION_QUIT,\n event['label'],\n event['username'],\n MSG_ALERT,\n NO_MESSAGE\n )\n )", "def shutdown(self):\n self.thread.server.shutdown()\n self.thread.join()", "def quitme(self, evt=None):\n if evt:\n self.dbgprint(\"bye!\")\n sys.exit()", "def request_quit(self):\n self._socketpair[1].send(b'\\x00')", "def request_quit(self):\n self._socketpair[1].send(b'\\x00')", "def command_quit(self, arg):\n self.write('221 Bye', self.finish)", "def delete_thread(self, thread_uid: str):\n pass", "def clickQuit(self, event):\n self.quitFlag = True", "def on_closing(event=None):\r\n my_msg.set(\"{quit}\")\r\n send()", "def quit(phenny, input):\n # Can only be done in privmsg by the owner\n if input.sender.startswith('#'): return\n if input.owner: \n phenny.write(['QUIT'])\n __import__('sys').exit(0)", "def Quit(self):\n loop.quit()", "def Quit(self, event):\n pass", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def sendQuitFlag():\n simuConfig[\"FLAG.QUIT\"] = True", "def call_quit(self, _):\n return MENU_QUIT", "def call_quit(self, _):\n return MENU_QUIT", "def call_quit(self, _):\n return MENU_QUIT", "def quit(self, message: str = 'Disconnected') -> None:\n self.send(\"QUIT\", message)\n self.writer.close()", "def _quit(self, *args):\n self.cleanup()", "def do_quit(self, arg):\n cprint(('Thankyou for Using this todo Application!'), 'yellow')\n exit()", "def signal_shutdown(self, sig, frame):\n _LOGGER.debug('%s received; shutting down...',\n signal.Signals(sig).name) # pylint: disable=no-member\n self._shutdown = True\n if self._get_task:\n self._get_task.cancel()\n\n # Issue #8 - Cancel not processed until next message added to queue.\n # Just put a dummy object on the queue to ensure it is handled immediately.\n self._pending_messages.sync_q.put_nowait(None)", "def do_quit(self,line):\n self.quit()", "def bye(event=None):\r\n s_message.set(\"{quit}\")\r\n send()", "def workerExceptionThrown(self, exc, tb):\n self.worker.deleteLater()\n self.thread.quit()\n self.thread.wait()\n self.thread.deleteLater()\n print str(tb)\n self.iface.messageBar().pushMessage('Error:' + str(tb), level=QgsMessageBar.CRITICAL, duration=3)", "def TTAPI_ShutdownCompleted(self, sender, e):\r\n # Shutdown the Dispatcher\r\n if self._m_disp != None:\r\n self._m_disp.BeginInvokeShutdown()\r\n self._m_disp = None", "def handle_quit( self ):\n print \"bye!\"\n\n # really close the window\n return True", "def TTAPI_ShutdownCompleted(self, sender, e):\r\n # Shutdown the Dispatcher\r\n if self.m_disp != None:\r\n self.m_disp.BeginInvokeShutdown()\r\n self.m_disp = None", "def quit(self):\n self.disconnect()\n mySerialConnection = None\n logging.info(EXIT_STRING)\n self.frame.destroy()\n self.endCommand()\n #sys.exit()", "def quit(self, widget, data=None):\n self.destroy()", "def do_quit(self, arg):\n return True", "def do_quit(self, arg):\n return True", "def do_quit(self, arg):\n return True", "def do_quit(self, arg):\n return True", "def quit_worker(self):\n\n if self.isRunning():\n # TODO: Should find a better way of doing this by setting an external flag\n self.terminate()\n self.wait()\n\n self.pb.close()\n self.parent.setEnabled(True)\n self.parent.statusbar.showMessage(\"\")", "def workerFinished(self, ret):\n self.worker.deleteLater()\n self.thread.quit()\n self.thread.wait()\n self.thread.deleteLater()\n # remove widget from message bar\n self.iface.messageBar().popWidget(self.messageBar)\n if ret is not None:\n # report the result\n #layer, total_area = ret\n self.iface.messageBar().pushMessage('Finished!')\n else:\n # notify the user that something went wrong\n self.iface.messageBar().pushMessage('Job cancelled.', level=QgsMessageBar.WARNING, duration=3)", "def cb_quit(event):\n sys.exit()", "def do_quit(self, arg):\n\n print('Good Bye!')\n exit()", "def quit_application(self, event):\n self.Close()\n server.closeSocket()", "def terminate(self):\n print('Terminating Revshell thread.')\n self.server.close()", "def _process_quit(self, process_instance):\n process_instance.errcause = \"quitting process\"\n\n # Give the process notice to quit doing stuff.\n process_instance.quit()\n\n # Terminate IonProcessThread (may not have one, i.e. simple process)\n # @TODO: move this into process' on_quit()\n if getattr(process_instance, '_process', None) is not None and process_instance._process:\n process_instance._process.notify_stop()\n process_instance._process.stop()", "def quit(self):\n\n self.main_window.destroy()", "def menu_quit (self,widget,data):\n\t\tself.window.delete_event()", "def func_quit(self, data):\n check = bytes(data).decode().encode('ascii', 'ignore').decode().lower().rstrip()\n if check == 'quit':\n message = self.conf_th_ic.get_item(q_key='std-messages').get(check)\n self.func_sender(message)\n self.finish()\n return True", "def quit(tcp, userId):\n tcp.sendMessage('QUIT ' + userId)\n return True", "def quit(self, reason: str = None):\n self.logger.debug(\"Stopping event loop\")\n self.eventloop.stop()\n self.logger.info(\"Shutting down ZeroBot\" + f' with reason \"{reason}\"' if reason else \"\")\n self._shutdown_reason = reason", "def _selfTerminatorThread(self):\n if self._callerThread is None:\n return\n #Simple thread run function watches the caller thread.\n def callerWatcher():\n log.debug(\"Waiting for caller thread to die...\\n\\n\")\n #Poll the caller thread and check if it is still alive.\n while(self._callerThread.is_alive()):\n sleep(1)\n log.debug(\"Caller thread is longer alive ... terminate self...\\n\\n\")\n self.terminate()\n \n terminator = Thread(target=callerWatcher)\n terminator.start()", "def quit():\n raise EmbeddedConsoleExit", "def kill(self, threadid):\n self.rpc.call(MsfRpcMethod.CoreThreadKill, [threadid])", "def do_quit(self, args):\n raise SystemExit", "def quit(self):\n\n logging.warning(\"IPC: Quitting.\")\n try:\n self.subprocess.terminate()\n except OSError:\n logging.warning(\"Failed to terminate subprocess.\")\n self.thread_stop.set()\n\n if self.reader_thread:\n try:\n self.reader_thread.join()\n except RuntimeError:\n pass", "def irc_QUIT(self, prefix, params):\n user = re.match(self.user_regex, prefix)\n if len(params) == 0:\n reason = \"No Message\"\n else:\n reason = params[0]\n\n self.logger.debug(\n \"%s!%s@%s quit (%s)\" %\n (user.group(1), user.group(2), user.group(3), reason)\n )\n\n self.event_manager.fire(\"irc.quit\", user, reason)", "def ev_quit(self, event: Quit) -> None:", "def quit(exitcode = EXIT_CODE_OK):\n global _running\n global _exitcode\n _exitcode = exitcode\n _running = False", "def do_quit(ftp):\n try:\n ftp.quit_cmd()\n except Exception as e:\n print(\"An error has occurred: \" + str(e) + \"\\nPlease try again.\")\n return main_menu(ftp)", "def quit():\n\tsys.exit()", "def do_QUIT(self):\r\n self.send_response(200)\r\n self.end_headers()\r\n self.server.stop = True", "def shutdown(self, signum, frame):\n self.serverSocket.close()\n sys.exit(0)", "def shutdown(signum, frame): # pragma: no cover\n logging.info(\"Shutting down\")\n sys.exit(0)", "def quitme(self, evt=None):\n if evt:\n self.dbgprint(\"too much for testing: so-long\")\n sys.exit()", "def cmd_quit(args):", "def quit_window(self, value=None):\n exit()", "def quit(self):\n pygame.display.quit()\n os.unlink(os.path.expanduser(os.path.join('~', '.cmus', 'inhibit-osd')))\n if hasattr(self, 'lircsock'):\n pylirc.exit()\n if self.thread and self.queue:\n self.queue.put('quit', False)\n self.queue.join()\n self.activate_screensaver()", "def quit(self):\n return pygame.event.Event(pygame.QUIT)", "def do_quit(self, args):\n print('Good Bye!')\n exit()", "def quit(self):\n self.quit = True", "def quit(self, evt=None):\n self.log2Stdout()\n self.jobManager.stopLogging() \n self.mainwin.destroy()\n print 'closing plugin'\n return", "def quit(self):\n self.window.quit()\n self.window.destroy()", "def thread_finished(self):\n # self.worker.join()\n self.worker = None\n self.want_to_abort = False", "def do_quit(self, args):\n return True", "def do_quit(self, args):\n return True", "def shutdown(self):\n self.socket_thread.stop()", "async def module_command_quit(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Shutting down\"\n self.quit(reason)", "def quit(self: object) -> None:\n self.destroy()", "def quit(self, *args, **kwargs):\n pass", "def bcp_goodbye(self, **kwargs):\n if self.config['mediacontroller']['exit_on_disconnect']:\n self.socket_thread.sending_thread.stop()\n sys.exit()" ]
[ "0.6247474", "0.6094396", "0.58557373", "0.57782835", "0.57772505", "0.5738489", "0.5506181", "0.5447505", "0.5402179", "0.5354124", "0.5341512", "0.5266031", "0.5251934", "0.52516365", "0.52507806", "0.5238659", "0.5238613", "0.5237029", "0.5236302", "0.5228808", "0.522184", "0.52120554", "0.5207501", "0.5206864", "0.51987433", "0.5194935", "0.5194935", "0.5187494", "0.5186202", "0.51601225", "0.5152666", "0.5128962", "0.5128638", "0.51241976", "0.51201373", "0.51201373", "0.51201373", "0.51201373", "0.51130366", "0.5107898", "0.5107898", "0.5107898", "0.50958294", "0.50836575", "0.50463206", "0.5045567", "0.5032959", "0.5030551", "0.5029959", "0.50283366", "0.5010494", "0.5009608", "0.50084233", "0.5000152", "0.4996533", "0.4996533", "0.4996533", "0.4996533", "0.49946102", "0.49910915", "0.49822", "0.49763423", "0.4975718", "0.49686992", "0.49472696", "0.4944431", "0.4934608", "0.4927165", "0.49234307", "0.49225938", "0.49159703", "0.4911449", "0.49086925", "0.48994482", "0.48828503", "0.48764914", "0.48723617", "0.48571053", "0.48497748", "0.4843191", "0.48412412", "0.4836641", "0.48240447", "0.48144802", "0.481114", "0.48091042", "0.4801191", "0.4800572", "0.47993398", "0.47889745", "0.4773018", "0.47724676", "0.47472373", "0.47299424", "0.47299424", "0.47203556", "0.47184306", "0.47159645", "0.47145405", "0.47079897" ]
0.7239463
0
The Windows version of base.processTerminate
def processTerminate(uPid): # pylint: disable=no-member fRc = False; try: hProcess = win32api.OpenProcess(win32con.PROCESS_TERMINATE, False, uPid); except: reporter.logXcpt('uPid=%s' % (uPid,)); else: try: win32process.TerminateProcess(hProcess, 0x40010004); # DBG_TERMINATE_PROCESS fRc = True; except: reporter.logXcpt('uPid=%s' % (uPid,)); hProcess.Close(); #win32api.CloseHandle(hProcess) return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def terminate(process):\n\n def terminate_win(process):\n import win32process\n return win32process.TerminateProcess(process._handle, -1)\n\n def terminate_nix(process):\n import os\n import signal\n return os.kill(process.pid, signal.SIGTERM)\n\n terminate_default = terminate_nix\n\n handlers = {\n \"win32\": terminate_win, \n \"linux2\": terminate_nix\n }\n\n return handlers.get(sys.platform, terminate_default)(process)", "def terminate(self):\n try:\n self.process.terminate()\n return \"Process Terminated Successfully\"\n except Exception as e:\n logging.exception(e)\n return \"Failed to Terminate process\"", "def terminate(self):\n self._proc.terminate()", "def processKill(uPid):\n return processTerminate(uPid);", "def kill(self):\r\n try:\r\n if self.process:\r\n self.process.kill()\r\n self.process.wait()\r\n except WindowsError:\r\n # kill may not be available under windows environment\r\n pass", "def terminate(self):\n if self.proc:\n self.proc.kill()\n self.proc = None", "def _TerminateProcessByPid(self, pid):\n self._RaiseIfNotRegistered(pid)\n\n process = self._processes_per_pid[pid]\n\n self._TerminateProcess(process)\n self._StopMonitoringProcess(process)", "def terminate(process):\n r = process.poll()\n if r is not None:\n return r\n if get_os_name() == Os_Windows:\n import win32process, win32api\n # Emulate POSIX behaviour, where the exit code will be the negative\n # value of the signal that terminated the process\n # Open with rights to terminate and synchronize\n handle = win32api.OpenProcess(0x1 | 0x100000, False, process.pid)\n win32process.TerminateProcess(handle, -signal.SIGTERM)\n else:\n try:\n os.kill(process.pid, signal.SIGTERM)\n time.sleep(.05)\n except OSError, err:\n if err.errno == errno.ECHILD:\n # Presumably, the child is dead already?\n pass\n else:\n raise\n if process.poll() is None:\n os.kill(process.pid, signal.SIGKILL)\n\n return process.wait()", "def terminate(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.terminate()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def _KillProcess(self, pid):\n if sys.platform.startswith('win'):\n process_terminate = 1\n handle = ctypes.windll.kernel32.OpenProcess(\n process_terminate, False, pid)\n ctypes.windll.kernel32.TerminateProcess(handle, -1)\n ctypes.windll.kernel32.CloseHandle(handle)\n\n else:\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError as exception:\n logger.error('Unable to kill process {0:d} with error: {1!s}'.format(\n pid, exception))", "def stop(self):\n # trying this instead of SIGTERM\n # http://stackoverflow.com/a/6659191/3380530\n # self._process.send_signal(SIGINT)\n # Or not. SIGINT doesn't exist on Windows\n self._process.terminate()", "def terminate_proc(proc_name=None, proc_id=None):\n assert proc_name or proc_id, \"Neither 'proc_name' nor 'proc_id' are passed.\"\n if sys.platform == \"win32\":\n if proc_name:\n query = ['/fi', 'IMAGENAME eq %s' % proc_name]\n elif proc_id:\n query = ['/fi', 'PID eq %s' % proc_id]\n output = killableprocess.check_output(\n ['tasklist',\n '/nh', # don't display column headers\n '/fo', 'CSV'] # --> \"MyApp.exe\",\"4380\",\"Console\",\"1\",\"395.604 K\"\n + query)\n output = output.decode(sys.getfilesystemencoding())\n proc_ids = []\n for line in output.decode(sys.getfilesystemencoding()).split(\"\\n\"):\n line = line.replace(\"\\r\", \"\")\n if '\"' in line:\n proc_ids.append(eval('[%s]' % line)[1])\n for id in proc_ids:\n killableprocess.call(['taskkill', '/f', '/t', '/pid', id])\n else:\n pass # necessary ?", "def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return", "def terminate():\n sys.exit()", "def terminate(self):\n self._stop_proc(signal.SIGTERM)", "def close(self):\r\n try:\r\n self.proc.terminate()\r\n except (OSError, AttributeError): # pragma: no cover\r\n pass\r\n self.proc = None", "def terminate(self):\n raise NotImplementedError()", "def terminate(self):\n return terminate(self)", "def _terminate(self):\n\n stopsignal = self.stopsignal\n\n log.info('Stop Process Requested')\n self._terminating = True\n if self._p0:\n log.info('Sending signal %s to process %s' % (stopsignal, self._p0.pid))\n kill_tree(self._p0.pid, stopsignal)\n elif self._p0 is None:\n raise errors.ChalmersError(\"This process did not start this program, can not call _terminate\")", "def _AbortTerminate(self):\n for pid, process in self._processes_per_pid.items():\n if not process.is_alive():\n continue\n\n logger.warning('Terminating process: {0:s} (PID: {1:d}).'.format(\n process.name, pid))\n process.terminate()", "def stop(self):\n if self._process is not None:\n self._process.terminate()", "def terminate(self):\n\t\tself.raise_exc(SystemExit)", "def exit_with_terminate(self):\n if FunctionServer.MAIN_PROCESS_ID == os.getpid():\n self._function_manager.terminate_processes()\n self._exit_event_loop()", "def terminate(self):\n self.sock.close()\n try:\n self.process.terminate()\n self.process.wait(timeout=self.STOP_TIMEOUT)\n except TimeoutExpired:\n self.process.kill()\n shutil.rmtree(self.rundir)", "def cli(ctx):\n with process_manager.process_manager(**ctx.parent.cm_kwargs) as pm:\n pm.shutdown()", "def kill(self, pid, returncode):\r\n kernel32 = ctypes.windll.kernel32\r\n handle = kernel32.OpenProcess(1, 1, pid)\r\n ret = kernel32.TerminateProcess(handle, returncode)\r\n kernel32.CloseHandle(handle)\r\n return (0 != ret)", "def terminate(self):\n if not self.running:\n return\n self._process.stdin.write(b\"-stay_open\\nFalse\\n\")\n self._process.stdin.flush()\n self._process.communicate()\n del self._process\n self.running = False", "def _terminate(self):\n\n # invoke the callback to perform custom cleanup actions.\n try:\n self._on_parent_process_kill()\n except:\n # for extra safety. But it is better to make sure\n # that the overridden callback method '_on_parent_process_kill' does not throw anything.\n pass\n\n # send a terminate signal which has to send an exception and break the infinite loop in main thread.\n from scalyr_agent import scalyr_logging\n\n logger = scalyr_logging.getLogger(__name__)\n\n logger.info(\"Sending SIGTERM to worker process with PID %s\" % (os.getpid()))\n os.kill(os.getpid(), signal.SIGTERM)\n\n # To be on the safe side, give other threads some time to handle the SIGTERM gracefully and then send SIGKILL\n time.sleep(self._time_before_kill)\n\n logger.info(\"Sending SIGKILL to worker process with PID %s\" % (os.getpid()))\n os.kill(os.getpid(), signal.SIGKILL)", "def terminate(self):\n if self.proc:\n logging.info(\"Terminating Proxy Server...\")\n self.proc.terminate()\n self.proc = None", "def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False", "def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)", "def terminate(self):\n self.raise_exc(SystemExit)", "def terminate(\n proc: Popen,\n signal_fn: Callable[[Popen], Any] = Popen.terminate,\n wait_timeout: float = DEFAULT_TERMINATION_WAIT_TIMEOUT_SEC,\n) -> None:\n signal_fn(proc)\n try:\n proc.wait(timeout=wait_timeout)\n except TimeoutExpired:\n try:\n os.killpg(proc.pid, signal.SIGKILL)\n except ProcessLookupError:\n pass\n # It is not possible to inherit any potential orphaned grandchildren (unless we use the new\n # Linux-specific PR_SET_CHILD_SUBREAPER) so we can only wait for the main process.\n # We don't expect Kafka/ZK to have child processes anyway. Even if, init would clean up\n # orphaned processes - unless we're running in a Docker container without an init.\n proc.wait()", "async def terminate(self, restart=False) -> None:\n pass", "def test_stopProcessNaturalTermination(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n\r\n # Configure fake process to die 1 second after receiving term signal\r\n timeToDie = self.pm.protocols[\"foo\"].transport._terminationDelay = 1\r\n\r\n # Advance the reactor to just before the short lived process threshold\r\n # and leave enough time for the process to die\r\n self.reactor.advance(self.pm.threshold)\r\n # Then signal the process to stop\r\n self.pm.stopProcess(\"foo\")\r\n\r\n # Advance the reactor just enough to give the process time to die and\r\n # verify that the process restarts\r\n self.reactor.advance(timeToDie)\r\n\r\n # We expect it to be restarted immediately\r\n self.assertEqual(self.reactor.seconds(),\r\n self.pm.timeStarted[\"foo\"])", "def _process_quit(self, process_instance):\n process_instance.errcause = \"quitting process\"\n\n # Give the process notice to quit doing stuff.\n process_instance.quit()\n\n # Terminate IonProcessThread (may not have one, i.e. simple process)\n # @TODO: move this into process' on_quit()\n if getattr(process_instance, '_process', None) is not None and process_instance._process:\n process_instance._process.notify_stop()\n process_instance._process.stop()", "def terminate(self):", "def shutdown():\n self_pid = os.getpid()\n logging.info('Forcibly terminating program (PID=%s)', self_pid)\n os.kill(self_pid, signal.SIGKILL)", "def shutdown():\n # attempt to call cleanup() for the running service\n try:\n service_class.cleanup()\n except Exception:\n sys.stderr.write(\"Error in %s.cleanup():\\n\" % service_class.__name__)\n traceback.print_exc(file=sys.stderr)\n sys.stderr.flush()\n\n # \"yarn dev\" doesn't pass SIGTERM to its children - to be safe, kill all\n # subprocesses of the child process first\n try:\n # children() returns parent processes first - start with children\n # instead to make killing \"yarn dev\" more reliable\n for subchild in reversed(child.children(recursive=True)):\n try:\n subchild.terminate()\n except psutil.NoSuchProcess:\n # we may have already caused it to exit by killing its parent\n pass\n child.terminate()\n except psutil.NoSuchProcess:\n # child already exited\n pass\n\n child.wait()\n if exit_mode == ExitMode.CHILD and child.returncode != 0:\n sys.stdout.buffer.write(child_stdout.to_bytes())\n sys.stdout.flush()\n sys.stderr.write(\n \"Subprocess %r exited with error %i:\\n\"\n % (command, child.returncode)\n )\n sys.stderr.buffer.write(child_stderr.to_bytes())\n sys.stderr.flush()", "def __exit__(self, exc_type, exc_value, traceback):\n if self.returncode is None and self.proc.poll() is None:\n self.proc.terminate()", "def terminate():\r\n pygame.quit()\r\n os._exit(1)", "def test_terminate_run(self):\n pass", "def terminate(self):\n return", "def terminate(self):\n return", "def kill_process(pid, exit_code=None):\n\n if exit_code is None:\n exit_code = DEFAULT_TERMINATION_EXIT_CODE\n\n try:\n handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, pid)\n except pywintypes.error:\n return False # \"The parameter is incorrect.\"\n\n if not handle:\n return False\n\n try:\n win32api.TerminateProcess(handle, exit_code)\n return True\n except pywintypes.error:\n return False # \"Access is denied.\"\n finally:\n win32api.CloseHandle(handle)", "def shutdown():\n return subprocess.run([\"powershell.exe\", \"-Command\", \"Stop-Computer\", \"-ComputerName\", \"localhost\"], shell=True, universal_newlines=True, check=False).returncode", "def stop(self):\n if not self.process_pid:\n raise Exception('why is this being called? %s' % self.server_name)\n\n if self.stop_kill:\n os.kill(self.process_pid, signal.SIGTERM)\n rc = wait_for_fork(self.process_pid, raise_error=False)\n return (rc, '', '')", "def CloseProcessHandle(process_handle: int) -> int:\n return kernel32.CloseHandle(process_handle)", "def _kill_subprocess(self) -> None:\n assert current_thread() is self._subprocess_thread\n if self._subprocess is None:\n return\n\n print(f'{Clr.CYN}Stopping subprocess...{Clr.RST}')\n\n # First, ask it nicely to die and give it a moment.\n # If that doesn't work, bring down the hammer.\n self._subprocess.terminate()\n try:\n self._subprocess.wait(timeout=10)\n except subprocess.TimeoutExpired:\n self._subprocess.kill()\n self._reset_subprocess_vars()\n print(f'{Clr.CYN}Subprocess stopped.{Clr.RST}')", "def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def terminate(self):\n self._running = False", "def kill(self):\n processes = ['MicrosoftEdge.exe', 'MicrosoftEdgeCP.exe', 'plugin-container.exe',\n 'browser_broker.exe', 'smartscreen.exe']\n for exe in processes:\n subprocess.call(['taskkill', '/F', '/T', '/IM', exe])", "def terminate(self): # pragma: no cover ; not tested / running over multiprocessing\n\n self.loop = False\n self._terminate()", "def terminate_program(msg=None):\n if msg:\n LOGGER.critical(msg)\n sys.exit(-1 if msg else 0)", "def close(self):\n if self.primary:\n os.close(self.primary)\n self.primary = None\n if self.secondary:\n os.close(self.secondary)\n self.secondary = None\n if hasattr(self, \"_process\") and self._process:\n if self._process.poll() is None:\n self._process.terminate()\n while self._process.poll() is None:\n time.sleep(0.001)\n self._process = None", "def terminate():\n dislin.disfin()", "def quit(self):\n if os.name == 'nt':\n self.hotkey.terminate()\n self.hotkey.wait(10)\n\n super().quit()", "def close(self) -> None:\n if self._process:\n self._process.terminate()\n self._process.wait()\n self._process = None", "def test_stopProcessForcedKill(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.reactor.advance(self.pm.threshold)\r\n proc = self.pm.protocols[\"foo\"].transport\r\n # Arrange for the fake process to live longer than the killTime\r\n proc._terminationDelay = self.pm.killTime + 1\r\n self.pm.stopProcess(\"foo\")\r\n # If process doesn't die before the killTime, procmon should\r\n # terminate it\r\n self.reactor.advance(self.pm.killTime - 1)\r\n self.assertEqual(0.0, self.pm.timeStarted[\"foo\"])\r\n\r\n self.reactor.advance(1)\r\n # We expect it to be immediately restarted\r\n self.assertEqual(self.reactor.seconds(), self.pm.timeStarted[\"foo\"])", "def _terminate_daemon_process(self):\n pid = self.pidfile.read_pid()\n try:\n os.kill(pid, signal.SIGTERM)\n except OSError, exc:\n raise DaemonRunnerStopFailureError(\n \"Failed to terminate %(pid)d: %(exc)s\" % vars())", "def stop(self):\n # print \"process shutdown complete\"", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfClient.KILL_STRING)", "def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")", "def terminate(self):\n self.send_signal(signal.SIGTERM)", "def shutdown(self):\r\n method = moduleName + '.' + self.className + '.' + 'shutdown'\r\n if self.terminationStarted == False:\r\n #inform the manage that we wish to terminate\r\n # Then wait 50 miliseconds before retrying dQueue\r\n self.terminationStarted = True\r\n self.commQueue.put( [self, terminationSteps.START, self.localCommQueue] )\r\n self._stopevent.wait(self._sleepperiod) \r\n try:\r\n self.awaitVerification()\r\n self.finalizeShutdown()\r\n #raise SystemExit()\r\n sys.exit()\r\n except Exceptions.WorkerThreadTerminationRollback:\r\n raise Exceptions.WorkerThreadTerminationRollback()\r\n except Exceptions.WorkerThreadIndexError:\r\n # AE returned terminationVerificationMsg.ERROR\r\n self.finalizeShutdown(False)\r\n raise SystemExit()\r\n except Exception as e:\r\n errorMsg = \"Abnormal termination start of worker thread %s, for landmark %s. Traceback = %s\" %(self._Thread__name, self.queueID, e)\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n self.finalizeShutdown(False)\r\n raise SystemExit(errorMsg)\r\n else:\r\n #We are truly ready to close\r\n try:\r\n self.finalizeShutdown()\r\n except Exceptions.WorkerThreadTerminationRollback:\r\n raise Exceptions.WorkerThreadTerminationRollback()\r\n except Exception:\r\n #errors already logged in the finalizeShutdown method\r\n pass", "def terminate(self):\n if not self.is_terminated:\n log.debug(\"terminal client terminated\")\n try:\n self.exec_command(b\"Quit\")\n except BrokenPipeError:\n # x3270 was terminated, since we are just quitting anyway, ignore it.\n pass\n except socket.error as e:\n # if 'was forcibly closed' not in str(e):\n if e.errno != errno.ECONNRESET:\n raise\n # this can happen because wc3270 closes the socket before\n # the read() can happen, causing a socket error\n\n self.app.close()\n\n self.is_terminated = True", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfServer.KILL_STRING)", "def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=True,\n kill_string=IperfServer.KILL_STRING)\n self.child_pid = None", "def killProcess(self):\n if self._processEnded:\n return defer.succeed(None)\n self.onProcessEnd = defer.Deferred()\n self.transport.signalProcess('KILL')\n return self.onProcessEnd", "def _terminateAll(self):\n\n # Termination of all processes\n try :\n for process in self.processes:\n process.terminate()\n except AttributeError:\n pass\n\n return", "def exit(self):\n self.tcp_server_exit_event.set()\n for _, process in self.name_to_process.items():\n process.terminate()", "def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=(not self.length),\n kill_string=IperfClient.KILL_STRING)\n self.child_pid = None", "def kill_gracefully(process, timeout=2):\n try:\n with suppress(ProcessLookupError):\n process.terminate()\n stdout, stderr = process.communicate(timeout=timeout)\n except TimeoutExpired:\n process.kill()\n stdout, stderr = process.communicate()\n\n return process.returncode, stdout, stderr", "def kill(self):\n\n self.proc.kill()", "def _CloseHandle(self):\n ret = win32functions.CloseHandle(self.process)\n #win32api.CloseHandle(self.process)\n\n if ret == 0:\n ActionLogger().log('Warning: cannot close process handle!')\n #raise WinError()", "def on_terminate(self, agentName, process):\n self.log.info(\"%s's process with ID: %s has been terminated successfully\" % (agentName, process.pid))", "def shutdown():\n os.kill(os.getpid(), signal.SIGTERM)", "def TerminalClientStop(self, exitCode=200):\n pass", "def terminate(self):\n logger.debug(\"Outbox:terminate\")\n self._lock_terminate.acquire()\n assert self._terminated != True\n self._find.terminate()\n self._sum.terminate()\n self._tag.terminate()\n self._register.terminate()\n self._dispatcher.terminate()\n self._terminated = True\n self._lock_terminate.release()", "def _on_parent_process_kill(self):", "def terminate(self):\n self._terminate = True\n if self.started:\n self.join()", "def pkill(process_name):\n try:\n killed = os.system('taskkill /im ' + process_name)\n except Exception:\n killed = 0\n return killed", "def terminate(self):\n self._running = False", "def abort(self):\n if self.processes is None:\n return\n\n for p in self.processes:\n if p.poll() is None:\n p.terminate()\n try:\n p.wait(timeout=2)\n except subprocess.TimeoutExpired:\n p.kill()\n # Don't catch the TimeoutExpired exception as\n # wait should return immediately after the process\n # was killed. If this wait times out just let\n # the exception terminate the execution as\n # something has serriously gone wrong if the\\\n # process is still running.\n p.wait(timeout=5)", "def terminate(self):\n self.terminated = True", "def cleanup():\n logger.critical(\"Program termination cleanup routine executing.\")\n # Using os._exit() to fix a bug in subprocess.popen that causes the\n # interpreter to hang after on regular sys.exit, exit, or quit call.\n os._exit(0)", "def kill_program_completly(proc):\n kill_process_children(proc.pid)\n proc.terminate()\n os._exit(0)", "def terminate(exitmsg: str):\n print(exitmsg)\n sys.exit(1)", "def _terminate_thread(thread):\n if not thread.isAlive():\n return\n\n exc = ctypes.py_object(SystemExit)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)\n if res == 0:\n raise ValueError(\"nonexistent thread id\")\n elif res > 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def on_terminate(self):\n pass", "def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WINDOWS:\n Process.kill(proc_name='node')\n else:\n Process.kill(proc_name='node', proc_cmdline=Settings.Executables.TNS)\n Process.kill_by_commandline(cmdline='webpack.js')", "def terminate(self):\n if self._process and self._process.is_alive():\n self.log.info(\"Sending termination message to manager.\")\n try:\n self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)\n except ConnectionError:\n pass", "def terminate(self):\n num_terminated = 0\n for identifier, command in self.commands:\n if command.terminate():\n num_terminated += 1\n if num_terminated > 0:\n logger.warning(\"Terminated %s ..\", pluralize(num_terminated, \"external command\"))\n return num_terminated", "def terminate(self):\n for processor in self._processors.values():\n Stats.decr(\n \"dag_processing.processes\", tags={\"file_path\": processor.file_path, \"action\": \"terminate\"}\n )\n processor.terminate()", "def shutdown(self):\n\n if not self.proc:\n return\n try:\n if self.proc.poll() is None:\n kill(self.proc)\n for attempt in range(5):\n if self.proc.poll() is not None:\n return\n LOG.info('Waiting %dth for PID %d to exit...'\n % (5 - attempt, self.proc.pid))\n time.sleep(1)\n kill(self.proc, signal.SIGKILL)\n self.proc.wait()\n except:\n LOG.exception('ignoring uncaught exception while shutting down')", "def terminate(self):\n self._close()", "def platform_stop(self):\n self.platform.stop()", "def stop(self):\n if platform.system() == 'Windows':\n self._stop_daemons()\n for _, plugin in self._store.get_plugins().items():\n process = plugin.get_process()\n if process is not None and isinstance(\n process, subprocess.Popen) and not process.stdout.closed:\n process.stdout.close()", "def terminate():\n pygame.quit()\n sys.exit(0)", "def exit(self):\n if self._isSubProcessRunning() and self._exitCommand is not None:\n self.__process.stdin.write(self._exitCommand)\n self.__process.stdin.write(os.linesep)\n self.__process.stdin.flush()\n time.sleep(0.5)\n \n if self._isSubProcessRunning() :\n self.__process.kill()\n time.sleep(0.1)\n print 'Done!'" ]
[ "0.7198852", "0.6692012", "0.65206206", "0.65205437", "0.63315195", "0.6170306", "0.6133239", "0.60618556", "0.6033226", "0.60128194", "0.5967718", "0.5818627", "0.5798409", "0.57777244", "0.57758033", "0.5771237", "0.57694376", "0.57538503", "0.57486963", "0.5722882", "0.5717924", "0.5716761", "0.5672451", "0.56119466", "0.5598246", "0.55823064", "0.55636805", "0.5556215", "0.5529925", "0.5529005", "0.5525442", "0.552542", "0.5501763", "0.5500075", "0.54858667", "0.5480596", "0.5463131", "0.54604936", "0.54570013", "0.5455672", "0.5452615", "0.54465", "0.54354936", "0.54354936", "0.5417566", "0.5380091", "0.5376032", "0.5374571", "0.5358066", "0.535235", "0.53507245", "0.53389496", "0.53373563", "0.53177565", "0.53104293", "0.52806354", "0.5263316", "0.52600753", "0.5245693", "0.5219693", "0.52177465", "0.521759", "0.5216289", "0.52098346", "0.5205087", "0.5198405", "0.5180574", "0.51762074", "0.51596385", "0.51589894", "0.5158059", "0.5157394", "0.513633", "0.5135726", "0.5132675", "0.5131995", "0.5124967", "0.5124863", "0.51245856", "0.5123084", "0.5122496", "0.5121054", "0.511997", "0.51134217", "0.51132965", "0.51061815", "0.51029027", "0.5102359", "0.50976974", "0.5095732", "0.5091156", "0.50858885", "0.50815165", "0.5080359", "0.50737816", "0.5072069", "0.5051468", "0.5050784", "0.5050592", "0.5047685" ]
0.62610066
5
The Windows version of base.processKill
def processKill(uPid): return processTerminate(uPid);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill(self):\r\n try:\r\n if self.process:\r\n self.process.kill()\r\n self.process.wait()\r\n except WindowsError:\r\n # kill may not be available under windows environment\r\n pass", "def _KillProcess(self, pid):\n if sys.platform.startswith('win'):\n process_terminate = 1\n handle = ctypes.windll.kernel32.OpenProcess(\n process_terminate, False, pid)\n ctypes.windll.kernel32.TerminateProcess(handle, -1)\n ctypes.windll.kernel32.CloseHandle(handle)\n\n else:\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError as exception:\n logger.error('Unable to kill process {0:d} with error: {1!s}'.format(\n pid, exception))", "def terminate(process):\n\n def terminate_win(process):\n import win32process\n return win32process.TerminateProcess(process._handle, -1)\n\n def terminate_nix(process):\n import os\n import signal\n return os.kill(process.pid, signal.SIGTERM)\n\n terminate_default = terminate_nix\n\n handlers = {\n \"win32\": terminate_win, \n \"linux2\": terminate_nix\n }\n\n return handlers.get(sys.platform, terminate_default)(process)", "def pkill(process_name):\n try:\n killed = os.system('taskkill /im ' + process_name)\n except Exception:\n killed = 0\n return killed", "def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WINDOWS:\n Process.kill(proc_name='node')\n else:\n Process.kill(proc_name='node', proc_cmdline=Settings.Executables.TNS)\n Process.kill_by_commandline(cmdline='webpack.js')", "def kill(self):\n\n #Kill relevant process names\n if self.driver_type != 'firefox_wdm':\n os.system('pkill -f chrome')\n os.system('pkill -f Chrome')\n os.system('pkill -f chromedriver')\n else:\n os.system('pkill -f FireFox')\n #TODO: confirm this -> os.system('pkill -f geckodriver')", "def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False", "def test_stopProcessForcedKill(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.reactor.advance(self.pm.threshold)\r\n proc = self.pm.protocols[\"foo\"].transport\r\n # Arrange for the fake process to live longer than the killTime\r\n proc._terminationDelay = self.pm.killTime + 1\r\n self.pm.stopProcess(\"foo\")\r\n # If process doesn't die before the killTime, procmon should\r\n # terminate it\r\n self.reactor.advance(self.pm.killTime - 1)\r\n self.assertEqual(0.0, self.pm.timeStarted[\"foo\"])\r\n\r\n self.reactor.advance(1)\r\n # We expect it to be immediately restarted\r\n self.assertEqual(self.reactor.seconds(), self.pm.timeStarted[\"foo\"])", "def remote_kill():", "def kill(self, pid, returncode):\r\n kernel32 = ctypes.windll.kernel32\r\n handle = kernel32.OpenProcess(1, 1, pid)\r\n ret = kernel32.TerminateProcess(handle, returncode)\r\n kernel32.CloseHandle(handle)\r\n return (0 != ret)", "def _kill_kernel(self):", "def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")", "def kill(self):\n\n self.proc.kill()", "def _TerminateProcessByPid(self, pid):\n self._RaiseIfNotRegistered(pid)\n\n process = self._processes_per_pid[pid]\n\n self._TerminateProcess(process)\n self._StopMonitoringProcess(process)", "def _on_parent_process_kill(self):", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfClient.KILL_STRING)", "def kill(self):\n processes = ['MicrosoftEdge.exe', 'MicrosoftEdgeCP.exe', 'plugin-container.exe',\n 'browser_broker.exe', 'smartscreen.exe']\n for exe in processes:\n subprocess.call(['taskkill', '/F', '/T', '/IM', exe])", "def kill(pid):\n p = psutil.Process(pid)\n\n try:\n p.kill()\n except Exception:\n pass", "def cli(ctx):\n with process_manager.process_manager(**ctx.parent.cm_kwargs) as pm:\n pm.shutdown()", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfServer.KILL_STRING)", "def stop(self):\n # trying this instead of SIGTERM\n # http://stackoverflow.com/a/6659191/3380530\n # self._process.send_signal(SIGINT)\n # Or not. SIGINT doesn't exist on Windows\n self._process.terminate()", "def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def kill(self):\n if self.process is not None:\n LOGGER.info('Killing command...')\n self.process.kill()\n self.process = None", "def kill_process(proc):\r\n p1_group = psutil.Process(proc.pid)\r\n\r\n child_pids = p1_group.get_children(recursive=True)\r\n\r\n for child_pid in child_pids:\r\n os.kill(child_pid.pid, signal.SIGKILL)", "def script_kill(self):\n return self._execute([b'SCRIPT', b'KILL'], b'OK')", "def _kill_self():\n os.kill(os.getpid(), signal.SIGKILL)", "def killProcess(self):\n if self._processEnded:\n return defer.succeed(None)\n self.onProcessEnd = defer.Deferred()\n self.transport.signalProcess('KILL')\n return self.onProcessEnd", "def terminate_proc(proc_name=None, proc_id=None):\n assert proc_name or proc_id, \"Neither 'proc_name' nor 'proc_id' are passed.\"\n if sys.platform == \"win32\":\n if proc_name:\n query = ['/fi', 'IMAGENAME eq %s' % proc_name]\n elif proc_id:\n query = ['/fi', 'PID eq %s' % proc_id]\n output = killableprocess.check_output(\n ['tasklist',\n '/nh', # don't display column headers\n '/fo', 'CSV'] # --> \"MyApp.exe\",\"4380\",\"Console\",\"1\",\"395.604 K\"\n + query)\n output = output.decode(sys.getfilesystemencoding())\n proc_ids = []\n for line in output.decode(sys.getfilesystemencoding()).split(\"\\n\"):\n line = line.replace(\"\\r\", \"\")\n if '\"' in line:\n proc_ids.append(eval('[%s]' % line)[1])\n for id in proc_ids:\n killableprocess.call(['taskkill', '/f', '/t', '/pid', id])\n else:\n pass # necessary ?", "def kill(self):\n self._process.kill()", "def kill(self):\n self._stop_proc(signal.SIGKILL)", "def kill_process_by_pid(duthost, container_name, program_name, program_pid):\n kill_cmd_result = duthost.shell(\"docker exec {} kill -SIGKILL {}\".format(container_name, program_pid))\n\n # Get the exit code of 'kill' command\n exit_code = kill_cmd_result[\"rc\"]\n pytest_assert(exit_code == 0, \"Failed to stop program '{}' before test\".format(program_name))\n\n logger.info(\"Program '{}' in container '{}' was stopped successfully\"\n .format(program_name, container_name))", "def kill(self):\n if self.transport.pid is not None:\n self.transport.signalProcess('KILL')", "def kill_process(process):\n \n if process == None:\n print(\"No process to kill.\")\n pass\n else:\n os.killpg(os.getpgid(process.pid), signal.SIGTERM)\n process = None\n print(\"Process killed.\")\n return None", "def killMongosProc():\n cmd = [\"pgrep -f \\\"\" + MONGOS_KSTR + \"\\\" | xargs kill -9\"]\n executeCommand(cmd)", "def kill(pid):\n # If the process doesn't exist, it raises an exception that we can ignore.\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError:\n pass", "def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)", "def terminate(self):\n try:\n self.process.terminate()\n return \"Process Terminated Successfully\"\n except Exception as e:\n logging.exception(e)\n return \"Failed to Terminate process\"", "def GET_kill(self):\n sys.exit(0)", "def kill_process(pid, exit_code=None):\n\n if exit_code is None:\n exit_code = DEFAULT_TERMINATION_EXIT_CODE\n\n try:\n handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, pid)\n except pywintypes.error:\n return False # \"The parameter is incorrect.\"\n\n if not handle:\n return False\n\n try:\n win32api.TerminateProcess(handle, exit_code)\n return True\n except pywintypes.error:\n return False # \"Access is denied.\"\n finally:\n win32api.CloseHandle(handle)", "def _kill_subprocess(self) -> None:\n assert current_thread() is self._subprocess_thread\n if self._subprocess is None:\n return\n\n print(f'{Clr.CYN}Stopping subprocess...{Clr.RST}')\n\n # First, ask it nicely to die and give it a moment.\n # If that doesn't work, bring down the hammer.\n self._subprocess.terminate()\n try:\n self._subprocess.wait(timeout=10)\n except subprocess.TimeoutExpired:\n self._subprocess.kill()\n self._reset_subprocess_vars()\n print(f'{Clr.CYN}Subprocess stopped.{Clr.RST}')", "def kill_process(self,PID):\n os.system(\"sudo kill {}\".format(PID))\n return True", "def kill(self):\n if self.client is None:\n # never started, can't stop - should be warning or exception?\n return False\n try:\n self.client.kill()\n except Py4JError:\n logger.debug(\"Error while attempting to kill\", exc_info=1)\n # fallback\n self.yarn_api.kill(self.app_id)\n if self.proc is not None:\n self.client_gateway.shutdown()\n if on_windows:\n call([\"cmd\", \"/c\", \"taskkill\", \"/f\", \"/t\", \"/pid\",\n str(self.proc.pid)])\n self.proc.terminate()\n self.proc.communicate()\n self.proc = None\n self.client = None\n out = self.runtime_status() == 'KILLED'\n return out", "def kill(proc_pid: int) -> None:\n\n if not psutil.pid_exists(proc_pid):\n return\n\n process = psutil.Process(proc_pid)\n\n for proc in process.children(recursive=True):\n proc.kill()\n\n process.kill()", "def terminate(self):\n if self.proc:\n self.proc.kill()\n self.proc = None", "def stop(self):\n if not self.process_pid:\n raise Exception('why is this being called? %s' % self.server_name)\n\n if self.stop_kill:\n os.kill(self.process_pid, signal.SIGTERM)\n rc = wait_for_fork(self.process_pid, raise_error=False)\n return (rc, '', '')", "def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return", "def kill_child(self, pid):\n # try communicate\n try:\n self._child_pids[pid].communicate()\n except Exception:\n print('Could not communicate to child')\n try:\n self.execute_command(\"kill -9 \"+str(pid))\n except Exception as e:\n print(e)", "def sessionkill(self):\n self.rpc.call(MsfRpcMethod.ConsoleSessionKill, [self.cid])", "def kill(self):\n self.proc.kill()\n self.proc.wait()\n self.thread.join()", "def stop():\n\n crate = get_crate()\n # Tell the thread to stop\n crate.mch_comms.stop = True\n # Stop the ipmitool shell process\n try:\n if crate.mch_comms.ipmitool_shell:\n crate.mch_comms.ipmitool_shell.terminate()\n crate.mch_comms.ipmitool_shell.kill()\n except:\n pass", "def killProcessByName(name, user = os.getpid(), sig = None):\n\n pids = findPIDs(name = name, user = user)\n\n if len(pids) == 0:\n #We have no processes to kill of this type\n return pids\n\n command = ['kill']\n if sig:\n command.append('-%i' % sig)\n for pid in pids:\n command.append(pid)\n\n subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]\n \n\n return pids", "def stopProcesses(*args):\n _stopProcessSet(_running)", "def shutdown():\n return subprocess.run([\"powershell.exe\", \"-Command\", \"Stop-Computer\", \"-ComputerName\", \"localhost\"], shell=True, universal_newlines=True, check=False).returncode", "def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=True,\n kill_string=IperfServer.KILL_STRING)\n self.child_pid = None", "def try_kill_process(proc):\n pid = proc.pid\n LOG.info(\"Killing process %s\" % pid)\n try:\n os.kill(pid, signal.SIGKILL)\n except Exception:\n LOG.exception(\"Failed to kill %s\" % pid)", "def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=(not self.length),\n kill_string=IperfClient.KILL_STRING)\n self.child_pid = None", "def stop(self):\n if self._process is not None:\n self._process.terminate()", "def stop(self):\n os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)", "def __del__(self):\n self._proc.kill()", "def terminate(process):\n r = process.poll()\n if r is not None:\n return r\n if get_os_name() == Os_Windows:\n import win32process, win32api\n # Emulate POSIX behaviour, where the exit code will be the negative\n # value of the signal that terminated the process\n # Open with rights to terminate and synchronize\n handle = win32api.OpenProcess(0x1 | 0x100000, False, process.pid)\n win32process.TerminateProcess(handle, -signal.SIGTERM)\n else:\n try:\n os.kill(process.pid, signal.SIGTERM)\n time.sleep(.05)\n except OSError, err:\n if err.errno == errno.ECHILD:\n # Presumably, the child is dead already?\n pass\n else:\n raise\n if process.poll() is None:\n os.kill(process.pid, signal.SIGKILL)\n\n return process.wait()", "def kill_pid(pid):\n try:\n # Unable to import 'module'\n # pylint: disable=no-member,F0401\n import signal\n return os.kill(pid, signal.SIGTERM)\n except ImportError:\n pass", "def _stop_binary(self, alias):\n if alias is None:\n command = process_manager.ProcessManager.STOP + '\\n'\n else:\n command = '%s %s\\n' % (process_manager.ProcessManager.STOP, alias)\n self._socket.sendall(command)", "def phone_kill(self) -> None:", "def _kill_cbbackupmgr(self):\n self.sleep(1, \"times need for cbbackupmgr process run\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n if self.os_name != \"windows\":\n cmd = \"ps aux | grep cbbackupmgr | gawk '{print $2}' | xargs kill -9\"\n output, _ = shell.execute_command(cmd)\n else:\n cmd = \"tasklist | grep cbbackupmgr | gawk '{printf$2}'\"\n output, _ = shell.execute_command(cmd)\n if output:\n kill_cmd = \"taskkill /F /T /pid %d \" % int(output[0])\n output, _ = shell.execute_command(kill_cmd)\n if output and \"SUCCESS\" not in output[0]:\n self.fail(\"Failed to kill cbbackupmgr on windows\")\n shell.disconnect()", "def processTerminate(uPid):\n # pylint: disable=no-member\n fRc = False;\n try:\n hProcess = win32api.OpenProcess(win32con.PROCESS_TERMINATE, False, uPid);\n except:\n reporter.logXcpt('uPid=%s' % (uPid,));\n else:\n try:\n win32process.TerminateProcess(hProcess, 0x40010004); # DBG_TERMINATE_PROCESS\n fRc = True;\n except:\n reporter.logXcpt('uPid=%s' % (uPid,));\n hProcess.Close(); #win32api.CloseHandle(hProcess)\n return fRc;", "def shutdown():\n self_pid = os.getpid()\n logging.info('Forcibly terminating program (PID=%s)', self_pid)\n os.kill(self_pid, signal.SIGKILL)", "def kill_application(css_win_login, application_path):\n kill_app = f\"taskkill /FI \\\"IMAGENAME eq {application_path}\\\" /F & \"\n rc, result, error = css_win_login.send_command(kill_app)\n return rc, result, error", "def _kill():\n\n messagebox.showerror(\n title=const.TSSD_ERROR_TITLE,\n message=const.TSSD_ERROR_MSG\n )\n\n sys.exit(3)", "def Kill(cls, pid, children=False):\n\t\tif pid is not None:\n\t\t\tif children:\n\t\t\t\tfor cpid, _, cmd in cls.Children(pid):\n\t\t\t\t\t# We need to recursively kill the childrens\n\t\t\t\t\tcls.Kill(cpid, children=True)\n\t\t\tLogger.Info(\"Killing process: \" + repr(pid))\n\t\t\treturn popen(\"kill -9 %s\" % (pid))\n\t\telse:\n\t\t\treturn None", "def kill(self,process_list,signal = 'TERM'):\n res = []\n pids = {}\n for process in process_list:\n if hasattr(process,'machine'):\n try:\n worker = self.worker_by_name[process.machine]\n except KeyError:\n worker = self.worker_by_name[process.long_machine]\n pid = process.pid\n else:\n worker = self.workers[process[0]]\n pid = process[1]\n try:\n pids[worker] = pids[worker] + ' ' + str(pid)\n except:\n pids[worker] = str(pid)\n for worker,value in pids.items():\n arg = 'kill -s ' + signal + ' %s' % (level,value)\n res.append(worker.apply(os.system,(arg,)))\n return res", "def stop(self, *args):\n if args[0] == 'all':\n for k, v in self.processers.items():\n if v:\n try:\n v.terminate()\n except:\n pass\n print 'Killed %s.' % k\n\n self.processers = dict.fromkeys(self.processers.keys())\n else:\n seq = args[0]\n try:\n self.processers['process%s' % seq].terminate()\n self.processers['process%s' % seq] = None\n print 'Killed process%s.' % seq\n except:\n print 'Have no process%s.' % seq", "def _kill_process(self, box_config):\n try:\n self.logger.info(f'kill: {box_config.process_name} {{')\n self.logger.info(f'target process pid={box_config.pid}')\n if box_config.pid and psutil.pid_exists(box_config.pid):\n p = psutil.Process(box_config.pid)\n p.kill()\n p.wait()\n box_config.pid = None\n self.bc_dao.update(box_config)\n remove_pid_file(box_config.process_name)\n except Exception:\n self.logger.error(f'Exception on killing: {box_config.process_name}', exc_info=True)\n finally:\n self.logger.info('}')", "def safe_kill(po):\r\n try:\r\n po.kill()\r\n except OSError as e:\r\n if e.errno != errno.ESRCH:\r\n raise\r\n po.wait()", "def terminate(self):\n self._proc.terminate()", "def kill(self):\n self.send_signal(signal.SIGKILL)", "def stop(self, kill=False):\n if not self._process:\n raise JubaTestFixtureFailedError('this instance has not been started yet')\n\n try:\n if kill:\n log.debug('KILLing process')\n self._process.kill()\n else:\n log.debug('terminating process')\n self._process.terminate()\n except OSError as e:\n if e.errno != errno.ESRCH: # \"No such process\"\n raise e\n # may be a race between poll and signal; just ignore\n log.debug('race between poll and signal detected')\n finally:\n (self.stdout, self.stderr) = self._process.communicate()\n self._process = None", "def stop_btslog():\r\n kill_process(['btslog.exe', 'BTSlog2.exe'])", "def _run(proc: Popen, timeout):\n try:\n return proc.wait(timeout=timeout)\n except TimeoutExpired:\n pass\n if sys.platform != 'win32':\n proc.send_signal(signal.SIGINT)\n try:\n return proc.wait(timeout=5)\n except TimeoutExpired:\n pass\n\n proc.terminate() # SIGTERM\n try:\n return proc.wait(timeout=5)\n except TimeoutExpired:\n pass\n\n proc.kill() # SIGKILL\n return proc.wait(timeout=5)", "def kill(self):\n self.status = Modem.Status.KILL", "def suicide(self):\n es.sexec(self.userid, \"kill\")", "def kill_process_by_port(port):\n port = int(port)\n pid = get_pid_by_port(port)\n if pid:\n return kill(pid)", "def test_KillCommand(execute):\n from paradrop.confd.command import KillCommand\n\n # Test with a numeric pid.\n command = KillCommand(12345)\n assert command.getPid() == 12345\n\n expected = [\"kill\", \"12345\"]\n command.execute()\n assert execute.called_once_with(expected)\n\n execute.reset_mock()\n\n pidFile = tempfile.NamedTemporaryFile(delete=True)\n pidFile.write(\"54321\")\n pidFile.flush()\n\n # Test with a pid file.\n command = KillCommand(pidFile.name)\n assert command.getPid() == 54321\n\n expected = [\"kill\", \"54321\"]\n command.execute()\n assert execute.called_once_with(expected)\n \n execute.reset_mock()\n\n # Test with a non-existent pid file.\n command = KillCommand(\"\")\n assert command.getPid() is None\n \n command.execute()\n assert not execute.called", "def kill(self):\n self.error_code = 'KILLED'\n self.running = False", "def kill_gracefully(process, timeout=2):\n try:\n with suppress(ProcessLookupError):\n process.terminate()\n stdout, stderr = process.communicate(timeout=timeout)\n except TimeoutExpired:\n process.kill()\n stdout, stderr = process.communicate()\n\n return process.returncode, stdout, stderr", "def killall(name, params=None):\n\n if platform.system() == \"Windows\":\n name += \".exe\"\n\n for ps in psutil.process_iter():\n cmdline = \"\"\n try:\n if ps.name() != name:\n continue\n\n if params:\n cmdline = ps.cmdline()\n except psutil.AccessDenied:\n continue\n\n ps_found = True\n\n if params: # If you want to compare command line\n check_list = []\n\n # Data converting\n if params is list:\n check_list = params\n elif params is str:\n check_list = str.split(\",\")\n else:\n check_list.append(str(params))\n\n # Compare command line's parameters\n for item in check_list:\n ps_found = False\n\n for param in cmdline:\n if param.find(item) != -1:\n ps_found = True\n break\n\n if ps_found is False: # Process is not found.\n break\n\n if ps_found:\n try:\n ps.kill()\n except Exception:\n pass", "def _stop_process(self):\n self.stdin_queue.put_nowait(\"quit\")\n ExternalProcess._stop_process(self)", "def kill(self):\n # Prevent a weird behavior: when STOPPED and kill() is called, app crashes (FIXME)\n if self.__state is not ServiceState.STOPPED:\n os.kill(int(self.__properties['MainPID']), signal.SIGKILL)\n # Not nice but simple and currently working (FIXME)\n # TODO: Change time.sleep to wait until process of same service but different PID is up and running\n time.sleep(0.5)", "def stop(self):\r\n cfunc = lib_importer.windll.DAQmxStopTask\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [lib_importer.task_handle]\r\n\r\n error_code = cfunc(self._handle)\r\n check_for_error(error_code)", "def terminate(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.terminate()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def killJobs(self, blTaskName, rng):\n return self._genericCommand('kill', blTaskName, rng)", "def kill(host):\n\ttry:\n\t\tprocess = subprocess.Popen([\"ssh\", host, \"pgrep -u cst042 python | xargs kill -s SIGTERM\"])\n\t\tprint process.wait()\n\texcept Exception, e:\n\t\tprint \"Unable to kill on %s\" % (str(host))", "def platform_stop(self):\n self.platform.stop()", "def stop_subprocesses():\n global message_interface\n global c_library_interface\n if message_interface:\n message_interface.stop()\n if c_library_interface:\n c_library_interface.stop()", "def safe_kill(pid):\n try:\n return os.kill(pid, signal.SIGKILL)\n except OSError as e:\n if e.errno == errno.ESRCH:\n # Raced with process termination\n pass\n else:\n raise", "def command(cmd, timeout): \n is_linux = platform.system() == 'Linux' \n \n p = subprocess.Popen(cmd, stderr=None, stdout=None, shell=True, preexec_fn=os.setsid if is_linux else None) \n t_beginning = time.time() \n seconds_passed = 0 \n while True: \n if p.poll() is not None: \n break \n seconds_passed = time.time() - t_beginning \n if timeout and seconds_passed > timeout: \n if is_linux: \n os.killpg(p.pid, signal.SIGTERM) \n print 'linux'\n else: \n p.terminate()\n p.kill()\n print 'windows' \n print 'timeout!'\n time.sleep(0.1)", "def kill(pids):\n for pid in pids:\n process = psutil.Process(pid)\n for proc in process.children(recursive=True):\n proc.kill()\n process.kill()\n return", "def _restartProcessNormal(self) -> None:\n\n if IS_WIN_SVC in sys.argv:\n reactor.callFromThread(reactor.stop)\n return\n\n python = sys.executable\n argv = list(sys.argv)\n\n def addExe(val):\n if not \"run_peek_\" in val:\n return val\n if isWindows and not val.lower().endswith(\".exe\"):\n return val + \".exe\"\n return val\n\n argv = map(addExe, argv)\n os.execl(python, python, *argv)", "def kill(self):\r\n\r\n endpoint = self._get_nailgun_endpoint()\r\n if endpoint:\r\n self._log_kill(endpoint.pid, endpoint.port)\r\n try:\r\n os.kill(endpoint.pid, 9)\r\n except OSError:\r\n pass", "def kill_running_process(appName=\"bitcoind\"):\n for line in os.popen(\"ps ax | grep \" + appName + \" | grep -v grep\"):\n fields = line.split()\n pid = fields[0]\n os.kill(int(pid), signal.SIGKILL)", "def shutdown():\n # attempt to call cleanup() for the running service\n try:\n service_class.cleanup()\n except Exception:\n sys.stderr.write(\"Error in %s.cleanup():\\n\" % service_class.__name__)\n traceback.print_exc(file=sys.stderr)\n sys.stderr.flush()\n\n # \"yarn dev\" doesn't pass SIGTERM to its children - to be safe, kill all\n # subprocesses of the child process first\n try:\n # children() returns parent processes first - start with children\n # instead to make killing \"yarn dev\" more reliable\n for subchild in reversed(child.children(recursive=True)):\n try:\n subchild.terminate()\n except psutil.NoSuchProcess:\n # we may have already caused it to exit by killing its parent\n pass\n child.terminate()\n except psutil.NoSuchProcess:\n # child already exited\n pass\n\n child.wait()\n if exit_mode == ExitMode.CHILD and child.returncode != 0:\n sys.stdout.buffer.write(child_stdout.to_bytes())\n sys.stdout.flush()\n sys.stderr.write(\n \"Subprocess %r exited with error %i:\\n\"\n % (command, child.returncode)\n )\n sys.stderr.buffer.write(child_stderr.to_bytes())\n sys.stderr.flush()" ]
[ "0.726238", "0.6810974", "0.6760341", "0.6577071", "0.63967526", "0.63236195", "0.63068575", "0.6289544", "0.6275234", "0.6238362", "0.62296176", "0.6153977", "0.6149264", "0.6146983", "0.6144532", "0.61424387", "0.6141811", "0.60982275", "0.60788274", "0.6070093", "0.6050515", "0.60378826", "0.6020217", "0.5984187", "0.5954449", "0.5941723", "0.5895441", "0.5889775", "0.5874231", "0.5865436", "0.5849486", "0.5847604", "0.5821043", "0.5794664", "0.5773094", "0.57387835", "0.5731786", "0.5701515", "0.56758255", "0.56740093", "0.5670111", "0.5657362", "0.5655186", "0.5651802", "0.56486833", "0.56477225", "0.56426084", "0.5630744", "0.56299496", "0.5627423", "0.56233424", "0.56232333", "0.5622381", "0.56143296", "0.56040156", "0.55999225", "0.55927074", "0.5556849", "0.5552462", "0.5542387", "0.5531089", "0.5528286", "0.55145204", "0.5509359", "0.5500589", "0.55004543", "0.54863244", "0.5472099", "0.5465489", "0.5453612", "0.5447996", "0.54326445", "0.54317063", "0.54298496", "0.5404122", "0.53775257", "0.5377185", "0.5365479", "0.5365321", "0.5364243", "0.53638923", "0.5354647", "0.5342384", "0.53300923", "0.5327415", "0.53202814", "0.53177136", "0.53095376", "0.5309443", "0.53046924", "0.5299237", "0.5290932", "0.5286719", "0.52840877", "0.52813786", "0.52672565", "0.5265158", "0.52374315", "0.5227621", "0.52263653" ]
0.74643934
0
The Windows version of base.processExists
def processExists(uPid): # We try open the process for waiting since this is generally only forbidden in a very few cases. try: hProcess = win32api.OpenProcess(win32con.SYNCHRONIZE, False, uPid); # pylint: disable=no-member except pywintypes.error as oXcpt: # pylint: disable=no-member if oXcpt.winerror == winerror.ERROR_INVALID_PARAMETER: return False; if oXcpt.winerror != winerror.ERROR_ACCESS_DENIED: reporter.logXcpt('uPid=%s oXcpt=%s' % (uPid, oXcpt)); return False; reporter.logXcpt('uPid=%s oXcpt=%s' % (uPid, oXcpt)); except Exception as oXcpt: reporter.logXcpt('uPid=%s' % (uPid,)); return False; else: hProcess.Close(); #win32api.CloseHandle(hProcess) return True;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def win():\n if platform.system() in WINDOWS:\n return True\n return False", "def is_windows():\n if os.name == \"nt\":\n return True\n return False", "def _on_windows() -> bool:\n return os.name == \"nt\"", "def is_windows():\n return os.name == \"nt\"", "def os_is_windows():\n return platform.system() == \"Windows\"", "def is_windows():\r\n return sys.platform == \"win32\"", "def is_windows():\n return sys.platform == \"win32\"", "def check_process_exist(process_name): \n returncode = '' \n try:\n p=os.popen('tasklist /FI \"IMAGENAME eq %s\"' % process_name) \n returncode = p.read().count(process_name) \n if returncode:\n initlog('%s exists' % process_name)\n except Exception, e:\n initlog(str(e)) \n return returncode", "def on_windows ():\n if bjam.variable(\"NT\"):\n return True\n\n elif bjam.variable(\"UNIX\"):\n\n uname = bjam.variable(\"JAMUNAME\")\n if uname and uname[0].startswith(\"CYGWIN\"):\n return True\n\n return False", "def is_win():\n return sys.platform[:3] == \"win\"", "def is_windows() -> bool:\n\n return sys.platform == 'win32'", "def program_exists(name):\n for path in os.environ['PATH'].split(os.path.pathsep):\n if path and os.path.exists(os.path.join(path, name)):\n return True\n return False", "def process_exists(name):\n for pid in [pid for pid in os.listdir(\"/proc\") if pid.isdigit()]:\n try:\n exe_name = os.readlink(os.path.join(\"/proc/\", pid, \"exe\"))\n except OSError:\n continue\n if exe_name and exe_name.endswith(os.path.join(\"/\", name)):\n return pid\n return None", "def is_windows() -> bool:\n return sys.platform == \"win32\"", "def IsInstalled(location=None):\n return not not base.Tool._GetExecutable(COMMAND, location)", "def is_running(program):\n \n #cmd = [\"xdotool\", \"search\", \"--name\", program]\n cmd = [\"xdotool\", \"search\", \"--name\", \"--class\", \"--classname\", program]\n try:\n subprocess.check_output(cmd)\n return True\n except:\n return False", "def ps_find(name):\n for proc in psutil.process_iter():\n if proc.name() == name:\n return True\n return False", "def available(self):\n\t\treturn self.executable(self.path)", "def available(self):\n\t\treturn self.executable(self.path)", "def available(self):\n\t\treturn self.executable(self.path)", "def processCheckPidAndName(uPid, sName):\n fRc = processExists(uPid);\n if fRc is True:\n try:\n from win32com.client import GetObject; # pylint: disable=F0401\n oWmi = GetObject('winmgmts:');\n aoProcesses = oWmi.InstancesOf('Win32_Process');\n for oProcess in aoProcesses:\n if long(oProcess.Properties_(\"ProcessId\").Value) == uPid:\n sCurName = oProcess.Properties_(\"Name\").Value;\n reporter.log2('uPid=%s sName=%s sCurName=%s' % (uPid, sName, sCurName));\n sName = sName.lower();\n sCurName = sCurName.lower();\n if os.path.basename(sName) == sName:\n sCurName = os.path.basename(sCurName);\n\n if sCurName == sName \\\n or sCurName + '.exe' == sName \\\n or sCurName == sName + '.exe':\n fRc = True;\n break;\n except:\n reporter.logXcpt('uPid=%s sName=%s' % (uPid, sName));\n return fRc;", "def is_64_windows(self):\n return 'PROGRAMFILES(X86)' in os.environ", "def available(self):\n\t\treturn self.executable(self.path[0]) and \\\n\t\t\tself.executable(self.path[1])", "def __virtual__():\n if not salt.utils.platform.is_windows():\n return False, \"This utility only available on Windows\"\n\n return __virtualname__", "def available(self):\n return self.executable(self.path)", "def process_exists(pid=None, name=None):\n\n return count_processes(pid, name) > 0", "def available_process(**kwargs):\n return LazySubprocessTester([sys.executable, \"-c\", \"import sys; sys.exit(0)\"], **kwargs)", "def isWindows(cls):\n return WIN", "def check_cmake_windows():\n chk = Popen(\"wmic product where \\\"name = 'cmake'\\\" get installlocation,version\",\n shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderr = chk.communicate()\n if stderr:\n return False, stderr\n lines = [re.sub(\" +\", \" \", line.strip())\n for line in stdout.decode().splitlines()\n if line.strip()]\n stdout = lines[1]\n location = stdout[:stdout.rfind(\" \")] + \"bin\"\n out_info(\"CMake not found in %PATH%. Temporarily adding: \\\"{}\\\"\".format(location))\n os.environ[\"PATH\"] += \";{}\".format(location)\n stdout = \"cmake {}\".format(stdout)\n return stdout, False", "def checkRunning(procname):\n return procdata.checkRunning(procname)", "def exec_exists(name):\n\n with settings(hide('everything'), warn_only=True):\n return local('which %s' % name, capture=True)", "def exists(_env):\n return True", "def exists(_env):\n return True", "def _has_prog(prog):\n try:\n subprocess.check_output(f\"which {prog}\", shell=True)\n return True\n except subprocess.CalledProcessError:\n return False", "def __virtual__():\n if not salt.utils.platform.is_windows():\n return False, \"This utility will only run on Windows\"\n\n return __virtualname__", "def is_64_windows():\n return 'PROGRAMFILES(X86)' in os.environ", "def command_exists(name, path=None):\n if path is None:\n path = sys.path\n\n for prefix in path:\n filename = os.path.join(prefix, name)\n is_executable = os.access(filename, os.X_OK)\n is_file = os.path.isfile(filename)\n if is_executable and is_file:\n return True\n\n return False", "def is_system(self) -> bool:", "def CheckProg(context, prog_name):\n\n context.Message(\"Checking whether %s program exists...\" % prog_name)\n path = context.env.WhereIs(prog_name)\n context.Result(bool(path))\n\n return path", "def process_exists(pid):\n try:\n os.kill(pid, 0)\n return True\n except ProcessLookupError:\n return False", "def _check_exe_folder(self):\n executable_folder = os.path.split(sys.executable)[0]\n return self._check_folder(executable_folder)", "def exe_match(expected_name):\n # expected_name = expected_name.encode('ascii')\n def f(win):\n n = conv(win.process_name)\n return n == expected_name\n return f", "def test_azurecli_binary_exists(host):\n host.file(PACKAGE_BINARY).exists", "def test_process_path(path):\n try:\n subprocess.call([path, \"--version\"])\n return True\n except:\n print(\"Cannot find executable on {}\".format(path))\n return False", "def supports_sys_executable(self):\n return bool(getattr(sys, \"executable\", None))", "def is_in_path(self):\n exe = self.command.split()[0]\n for try_path in os.environ[\"PATH\"].split(os.pathsep):\n try_path = try_path.strip('\"')\n exe_try = os.path.join(try_path, exe).strip()\n if os.path.isfile(exe_try) and os.access(exe_try, os.X_OK):\n return True\n return False", "def check_for(command):\n if shutil.which(command) is None:\n print(colored(\"{} not available on system\".format(command),\"red\"))\n sys.exit(1)", "def is_ghidra_running() -> bool:\n if os.name == \"nt\":\n find_ghidra = \"WMIC path win32_process get Commandline\"\n else:\n find_ghidra = \"ps -ax\"\n out = subprocess.check_output(find_ghidra.split())\n logger.debug(\"Running %s\", find_ghidra)\n if b\"ghidrarun\" in out.lower():\n return True\n return False", "def is_exe_in_path(name):\n if sys.platform == 'win32' and not name.endswith('.exe'):\n name += '.exe'\n\n for dir in os.environ['PATH'].split(os.pathsep):\n if os.path.exists(os.path.join(dir, name)):\n return True\n\n return False", "def is_process_running(name):\n if not hasattr(is_process_running, \"proc\"):\n is_process_running.proc = None # it doesn't exist yet, so init it\n\n if is_process_running.proc:\n if is_process_running.proc.is_running():\n return True\n else:\n is_process_running.proc = None\n return False\n else:\n for p in psutil.process_iter():\n if p.name() == name:\n is_process_running.proc = p\n return True\n #\n return False", "def is_available(self):\n try :\n p = subprocess.Popen([self.program_path, self.help_argument],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.communicate()\n return p.wait() == self.help_return_code\n except OSError:\n return False", "def is_exists(self):\n\n return os.path.isfile(os.path.join(self.scripts_dir, self.python_name))", "def is_exist(program):\n def is_exe(fpath):\n return path.isfile(fpath) and access(fpath, X_OK)\n\n fpath, _ = path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for mypath in environ[\"PATH\"].split(pathsep):\n exe_file = path.join(mypath, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def is_system(self) -> undefined.UndefinedOr[bool]:", "def checkForBinary(binary):\n try:\n fullPath = subprocess.check_output(['which',binary])\n return True\n except subprocess.CalledProcessError as e:\n return False", "def check_PATH_for_program(f):\n\n path = os.environ[\"PATH\"].split(\":\")\n\n for p in path:\n\n if os.path.isfile(os.path.join(p,f)):\n return True\n\n return False", "def is_process_running(pid):\n return os.path.exists(\"/proc/%s\" % pid)", "def is_proc_running(name):\n\n for p in psutil.process_iter(['name']):\n if p.info['name'] == name:\n return True\n\n return False", "def procExists(self, procname):\n\n proclist = self.getList( 'proclist' ) # safely get copy of process list\n\n count = 0 # count number of occurrences of 'procname'\n for i in proclist:\n command = string.split(i.comm, '/')[-1]\n if command == procname or i.procname == procname:\n count = count + 1\n\n return count", "def check_ambari_server_process_up(self):\n process_name = \"ambari-server\"\n output = self.__find_process(process_name)\n return re.search(process_name, output)", "def _ServerIsRunning( self ):\n return utils.ProcessIsRunning( self._gocode_handle )", "def _is_installed(self) -> bool:\n try:\n sh.Command(self._file_full_path)\n return True\n except sh.CommandNotFound:\n return False", "def getwindowsversion(): # real signature unknown; restored from __doc__\n pass", "def check_running(process, min=1):\n if j.data.platform.is_linux():\n pids = get_pids(process)\n if len(pids) >= min:\n return True\n return False", "def is_program_installed(basename):\n for path in os.environ[\"PATH\"].split(os.pathsep):\n abspath = osp.join(path, basename)\n if osp.isfile(abspath):\n return abspath", "def unavailable_process(**kwargs):\n return LazySubprocessTester([sys.executable, \"-c\", \"import sys; sys.exit(1)\"], **kwargs)", "def system():\n return uname().system", "def system():\n return uname().system", "def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)", "def checkIfToolExists(toolName):\n\n # Determine the percentage of good pixels \n cmd = ['which', toolName]\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n translateOut, err = p.communicate()\n\n # Parse the output\n failString = 'no ' + toolName + ' in ('\n if translateOut.find(failString) >= 0:\n raise Exception('Missing requested tool ' + toolName)\n else:\n return True", "def validate_no_win32() -> None:\n try:\n assert sys.platform != \"win32\"\n except AssertionError:\n logger.exception(\"This application cannot run on Windows!\")\n sys.exit(1)", "def check_process_for_pid(pid, process_name):\n pid = int(pid)\n proc = psutil.Process(pid)\n return proc.name() == process_name", "def test_system_platform():\n accepted_values = ['windows', 'linux']\n output = sh.system_platform()\n assert output in accepted_values", "def sys_path_exists(self):\n return os.path.exists(self.sys_class_orig_path)", "def is_wasabi_running():\n wasabi_process_id = run('pidof wassabee')\n if wasabi_process_id:\n return True\n else:\n return False", "def exists(_env):\n detector = DetectCompiler()\n if detector['icx'] is None:\n return False\n return True", "def process_exists(self, pid):\n try:\n os.kill(pid, 0)\n except OSError:\n return False\n return True", "def find_program(basename):\n names = [basename]\n if os.name == 'nt':\n # Windows platforms\n extensions = ('.exe', '.bat', '.cmd')\n if not basename.endswith(extensions):\n names = [basename+ext for ext in extensions]+[basename]\n for name in names:\n path = is_program_installed(name)\n if path:\n return path", "def _check_for_cmd(command):\n slab_logger.log(15, 'Checking if %s is installed' % command)\n # Note: Using type git here to establish if posix system has a binary\n # called git instead of which git b/c which often doesn't return\n # proper 0 or 1 exit status' and type does. Which blah on many\n # systems returns 0, which is bad.\n if os.name == \"posix\":\n returncode, myinfo = run_this('type %s' % command)\n return(returncode, myinfo)\n elif os.name == \"nt\":\n # test windows for git\n pass", "def _subprocess(cmd):\n\n log.debug('Running: \"%s\"', \" \".join(cmd))\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n ret = salt.utils.stringutils.to_unicode(proc.communicate()[0]).strip()\n retcode = proc.wait()\n\n if ret:\n return ret\n elif retcode != 1:\n return True\n else:\n return False\n except OSError as err:\n log.error(err)\n return False", "def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)", "def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)", "def test_skip_unless_windows(self):\n pass", "def test_container_exists():\n return exec_fn(_test_container_exists)", "def test_fastqc_exec_notexist():\n try:\n obj = fastqc.FastQC(os.path.join(\".\", \"fastqc\"))\n except NotExecutableError:\n return True\n else:\n return False", "def test_skip_if_windows(self):\n pass", "def pidExists(self, pid):\n\n prochash = self.getHash( 'datahash' ) # safely get copy of process dict\n\n try:\n prochash[pid]\n return 1\n except KeyError:\n return 0", "def pidExists(self, pid):\n\n prochash = self.getHash( 'datahash' ) # safely get copy of process dict\n\n try:\n prochash[pid]\n return 1\n except KeyError:\n return 0", "def _is_running(self, package_name):\n cmd = r' |echo $(grep -E {package_name})'.format(package_name=package_name)\n if self.device.sdk_version > 25:\n cmd = r'ps -A' + cmd\n else:\n cmd = r'ps' + cmd\n processes = self.adb_client.shell(cmd).splitlines()\n for ps in processes:\n if ps:\n ps = ps.split()\n return ps[1]\n return None", "def installer_exists(self, platform):\n \n validations.validate_platform(platform)\n \n installer_filename = os.path.join(\n settings.CUSTOM_INSTALLER_ROOT,\n self.build_id,\n constants.PLATFORM_BUNDLES[platform]\n )\n\n if os.path.isfile(installer_filename):\n return True\n \n return False", "def skip_on_windows (func):\n import sys\n\n return skip_if(sys.platform.startswith('win'))(func)", "def is_exe(fpath):\n import os\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)", "def exists():\n return PYTHON_VERSION is not None", "def verifyInstalled(cmd):\n\tprint \"Verifying %s works...\" % cmd\n\tif (sys.platform == 'win32'):\n\t\ttry:\n\t\t\tstatus = subprocess.call(shlex.split(cmd))\n\t\t\tprint \"Installation was successful.\"\n\t\t\treturn True\n\t\texcept OSError as e:\n\t\t\tprint >>sys.stderr, \"Execution failed with verification: \",e\n\t\t\tprint cmd + \" was not installed correctly.\"\n\t\t\treturn False\n\telse:\n\t\tstatus = os.system(cmd)\n\t\tif (status == NOT_INSTALLED):\n\t\t\tprint status\n\t\t\tprint \"An error occured with installation/environment variables. %s is still not installed.\" % cmd\n\t\t\treturn False\n\t\telse:\n\t\t\tprint \"Installation was successful.\"\n\t\t\treturn True", "def test_process_id():\n output = sh.process_id()\n assert isinstance(output, int) and output > 0", "def valid_nuget_executable(nuget_path):\n with open(os.devnull, \"w\") as devnull:\n try:\n return call(escape_exe_path(nuget_path) + \" help\", shell=True, stderr=devnull, stdout=devnull) == 0\n except:\n return False", "def BuildExists(buildname):\n for platform in platforms:\n if not os.path.exists(builds_basedir+'/'+platform+'/'+buildname):\n return False\n return True", "def is_exe(fpath):\n\treturn os.path.isfile(fpath) and os.access(fpath, os.X_OK)", "def checkForExe(exe):\n exepath = None\n \n # first check if we've been given an absolute path\n if len(os.path.split(exe)[0]):\n# print \"CHECK FOR EXE ABS PATH\", exe\n \n if os.path.exists(exe):\n exepath = exe\n \n else:\n # basename\n exe = os.path.basename(exe)\n# print \"SEARCHING FOR BASENAME IN SYS PATH\", exe\n \n if exepath is None:\n # check if exe programme located\n syspath = os.getenv(\"PATH\", \"\")\n syspatharray = syspath.split(\":\")\n found = 0\n for syspath in syspatharray:\n if os.path.exists(os.path.join(syspath, exe)):\n found = 1\n break\n \n if found:\n exepath = exe\n \n else:\n for syspath in EXTENDED_PATH:\n if os.path.exists(os.path.join(syspath, exe)):\n found = 1\n break\n \n if found:\n exepath = os.path.join(syspath, exe)\n \n else:\n exepath = 0\n \n return exepath", "def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)" ]
[ "0.68071175", "0.6609007", "0.6606889", "0.6557855", "0.65289325", "0.65233135", "0.6510651", "0.649545", "0.64642906", "0.64525825", "0.63975483", "0.6390339", "0.6355812", "0.63475555", "0.6226767", "0.6114815", "0.61133957", "0.6084441", "0.6084441", "0.6084441", "0.6052667", "0.6050264", "0.6033505", "0.5969112", "0.59625673", "0.59613526", "0.5948554", "0.5937459", "0.5929558", "0.59172606", "0.5916845", "0.5911443", "0.5911443", "0.5859063", "0.58551675", "0.58525246", "0.5830306", "0.5814561", "0.5796744", "0.5757065", "0.5739657", "0.57045543", "0.56963044", "0.5688074", "0.56873524", "0.56865317", "0.5674415", "0.5662489", "0.56624866", "0.56464803", "0.5640136", "0.5634707", "0.56221044", "0.5600954", "0.55926996", "0.5586479", "0.5582854", "0.5575054", "0.55747354", "0.5570139", "0.5543697", "0.55366224", "0.5531284", "0.55079156", "0.54906094", "0.5485018", "0.548479", "0.548479", "0.5476356", "0.5475349", "0.54690886", "0.546423", "0.5451441", "0.54415685", "0.54232335", "0.54217255", "0.5421068", "0.5416911", "0.53913414", "0.538198", "0.538076", "0.538076", "0.536925", "0.53689873", "0.5362728", "0.5353698", "0.5348925", "0.5348925", "0.53396267", "0.5337529", "0.5335153", "0.5334364", "0.5313067", "0.5311643", "0.5309909", "0.5309204", "0.5304039", "0.52875483", "0.52793026", "0.5273692" ]
0.6012618
23
The Windows version of base.processCheckPidAndName
def processCheckPidAndName(uPid, sName): fRc = processExists(uPid); if fRc is True: try: from win32com.client import GetObject; # pylint: disable=F0401 oWmi = GetObject('winmgmts:'); aoProcesses = oWmi.InstancesOf('Win32_Process'); for oProcess in aoProcesses: if long(oProcess.Properties_("ProcessId").Value) == uPid: sCurName = oProcess.Properties_("Name").Value; reporter.log2('uPid=%s sName=%s sCurName=%s' % (uPid, sName, sCurName)); sName = sName.lower(); sCurName = sCurName.lower(); if os.path.basename(sName) == sName: sCurName = os.path.basename(sCurName); if sCurName == sName \ or sCurName + '.exe' == sName \ or sCurName == sName + '.exe': fRc = True; break; except: reporter.logXcpt('uPid=%s sName=%s' % (uPid, sName)); return fRc;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exe_match(expected_name):\n # expected_name = expected_name.encode('ascii')\n def f(win):\n n = conv(win.process_name)\n return n == expected_name\n return f", "def check_process_for_pid(pid, process_name):\n pid = int(pid)\n proc = psutil.Process(pid)\n return proc.name() == process_name", "def process_exists(name):\n for pid in [pid for pid in os.listdir(\"/proc\") if pid.isdigit()]:\n try:\n exe_name = os.readlink(os.path.join(\"/proc/\", pid, \"exe\"))\n except OSError:\n continue\n if exe_name and exe_name.endswith(os.path.join(\"/\", name)):\n return pid\n return None", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def ps_find(name):\n for proc in psutil.process_iter():\n if proc.name() == name:\n return True\n return False", "def check_pid(pid):\n result = None\n try:\n s = os.stat('/proc/' + pid)\n if s.st_uid == our_uid:\n cwd = os.path.realpath('/proc/' + pid + '/cwd')\n if cwd == kill_dir and int(pid) != our_pid:\n f = open('/proc/' + pid + '/cmdline')\n cmdline = f.read().split('\\x00')[:-1]\n f.close()\n result = cmdline\n except OSError:\n # We can't read all our processes; that's ok\n pass\n return result", "def get_process_name(pid):\n proc = subprocess.Popen(['ps', '-p', pid, '-o', 'comm='],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err=proc.communicate()\n return out.strip().decode('utf-8')", "def name(self):\n # This is how PIDs 0 and 4 are always represented in taskmgr\n # and process-hacker.\n if self.pid == 0:\n return \"System Idle Process\"\n if self.pid == 4:\n return \"System\"\n return os.path.basename(self.exe())", "def get_pid_name(pid):\n try:\n with open(os.path.join('/proc/', pid, 'cmdline'), 'r') as pidfile:\n try:\n cmd = pidfile.readline().split()[0]\n return os.path.basename(cmd).rstrip('\\x00')\n except IndexError:\n # no cmd returned\n return \"<NO NAME>\"\n except IOError:\n # upstream wait any string, no matter if we couldn't read proc\n return \"no_such_process\"", "def get_name(pid, default=None):\n try:\n return only(\n process.Properties_(\"Name\").Value\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process')\n if process.Properties_(\"ProcessID\").Value == pid\n )\n except TooFewItemsError:\n return default", "def check_process_exist(process_name): \n returncode = '' \n try:\n p=os.popen('tasklist /FI \"IMAGENAME eq %s\"' % process_name) \n returncode = p.read().count(process_name) \n if returncode:\n initlog('%s exists' % process_name)\n except Exception, e:\n initlog(str(e)) \n return returncode", "def on_windows ():\n if bjam.variable(\"NT\"):\n return True\n\n elif bjam.variable(\"UNIX\"):\n\n uname = bjam.variable(\"JAMUNAME\")\n if uname and uname[0].startswith(\"CYGWIN\"):\n return True\n\n return False", "def is_process_running(name):\n if not hasattr(is_process_running, \"proc\"):\n is_process_running.proc = None # it doesn't exist yet, so init it\n\n if is_process_running.proc:\n if is_process_running.proc.is_running():\n return True\n else:\n is_process_running.proc = None\n return False\n else:\n for p in psutil.process_iter():\n if p.name() == name:\n is_process_running.proc = p\n return True\n #\n return False", "def pidof(processname = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname + \".pid\")\n if processname is not None and os.path.exists(pidpath):\n f = open (pidpath)\n pids = f.readlines()\n f.close()\n return pids\n else:\n return False", "def getPidByName(process_name):\n \n pid = None\n count = 0\n try:\n hProcessSnap = kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)\n pe32 = PROCESSENTRY32()\n pe32.dwSize = sizeof(PROCESSENTRY32)\n ret = kernel32.Process32First(hProcessSnap , byref(pe32))\n while ret:\n if pe32.szExeFile == LPSTR(process_name).value:\n pid = pe32.th32ProcessID\n count += 1\n ret = kernel32.Process32Next(hProcessSnap, byref(pe32))\n kernel32.CloseHandle (hProcessSnap)\n \n except Exception, e:\n debug_print(str(e))\n \n if not pid:\n debug_print(\"Could not find %s PID\" % process_name)\n \n return pid", "def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst", "def is_proc_running(name):\n\n for p in psutil.process_iter(['name']):\n if p.info['name'] == name:\n return True\n\n return False", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def cli_get_process_title():\n raise NotImplementedError()", "def check_processes(self, name: Optional[str] = None) -> str:\n\n for process in self.processes:\n if not process.is_running():\n self.processes.remove(process)\n continue\n\n cmdline = \" \".join(process.cmdline())\n port = re.findall(r\"--port=(\\d+)\", cmdline)\n port = port[0] if port else \"\"\n\n if re.findall(r\"-m\\s+.*streamlit_run|streamlit\", cmdline):\n return f\"http://localhost:{port}/{name}\"\n\n return \"\"", "def is_windows():\n return os.name == \"nt\"", "def get_pid_from_name(process_name:str) -> int:\r\n\tfor process in psutil.process_iter():\r\n\t\tif process_name in process.name():\r\n\t\t\treturn process.pid\r\n\traise ProcessLookupError(\"process '\" + process_name + \"' not found.\")", "def get_process_name(self):\n\n return self._args.t", "def is_windows():\r\n return sys.platform == \"win32\"", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def is_windows():\n return sys.platform == \"win32\"", "def windows_name(self):\n return self._windows_name", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def _on_windows() -> bool:\n return os.name == \"nt\"", "def get_process_cmdline(process_name):\n\n\tfor pretendant in execute(['ps', '-U', 'root', '-u', 'root', '-o', 'args='])[0].split(\n\t\t\t\"\\n\")[:-1]:\n\t\t#print pretendant\n\t\tif pretendant.find(process_name) != -1:\n\t\t\treturn pretendant.split(' ')", "def is_win():\n return sys.platform[:3] == \"win\"", "def checkRunning(procname):\n return procdata.checkRunning(procname)", "def findProcessIdByName(processName):\n listOfProcessObjects = []\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"create_time\"])\n # Check if process name contains the given name string.\n if processName.lower() in pinfo[\"name\"].lower():\n listOfProcessObjects.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return listOfProcessObjects", "def test_process_id():\n output = sh.process_id()\n assert isinstance(output, int) and output > 0", "def is_windows():\n if os.name == \"nt\":\n return True\n return False", "def set_process_name_and_cpu_priority(name):\n try:\n os.nice(19) # smooth cpu priority\n libc = cdll.LoadLibrary(\"libc.so.6\") # set process name\n buff = create_string_buffer(len(name.lower().strip()) + 1)\n buff.value = bytes(name.lower().strip().encode(\"utf-8\"))\n libc.prctl(15, byref(buff), 0, 0, 0)\n except Exception:\n return False # this may fail on windows and its normal, so be silent.\n else:\n log.debug(\"Process Name set to: {0}.\".format(name))\n return True", "def win():\n if platform.system() in WINDOWS:\n return True\n return False", "def os_is_windows():\n return platform.system() == \"Windows\"", "def __GetCurrentProcessName(self, G, node):\n prName = \"\"\n \n # Get targets of \"in relations\"\n relsIn = self.GetInOutRelationsForList(G, node, [Strings.in_, Strings.tool])\n inHolders = [rel[1] for rel in relsIn[gc.OutgoingRelations][Strings.in_]]\n \n # Get targets of \"requires\" relations\n reqTargets = set()\n for rel in relsIn[gc.InputRelations][Strings.tool]:\n reqTargets = reqTargets.union(set([r[1] for r in self.GetInOutRelationsForList(G, rel[0], [Strings.req])[gc.OutgoingRelations][Strings.req]]))\n\n # Do a search through \"in\" links\n while len(inHolders) and len(prName) == 0:\n nestedRels = self.GetInOutRelationsForList(G, inHolders[0], [Strings.is_, Strings.in_])\n isNames = [rel[1] for rel in nestedRels[gc.OutgoingRelations][Strings.is_]]\n \n # Once Process is met, return\n if Strings.ndProcess in isNames and inHolders[0] in reqTargets:\n prName = inHolders[0]\n else:\n inHolders += [rel[1] for rel in nestedRels[gc.OutgoingRelations][Strings.in_]]\n del inHolders[0]\n\n return prName", "def valid_process_name(function):\n\n def _wrapper(options, *args, **kwargs):\n from synergy.conf.process_context import ProcessContext\n if options.app not in ProcessContext.CONTEXT:\n msg = 'Aborting: application <%r> defined by --app option is unknown. \\n' % options.app\n sys.stdout.write(msg)\n raise ValueError(msg)\n return function(options, *args, **kwargs)\n\n return _wrapper", "def is_windows() -> bool:\n\n return sys.platform == 'win32'", "def _pidIsMyXvnc(self, pid):\n dirName = \"/proc/%d\" % pid\n try:\n if not os.path.exists(dirName):\n return False\n if os.stat(dirName).st_uid != os.getuid():\n return False\n if os.readlink(\"%s/exe\" % dirName) != _Paths.xvnc:\n return False\n return True\n except OSError:\n return False", "def process_cmdline(pid_info):\n\tif pid_info[\"cmdline\"]:\n\t\treturn reduce(lambda a, b: a + \" %s\" % b, pid_info[\"cmdline\"]).strip()\n\n\treturn pid_info[\"stat\"][\"comm\"]", "def test_process_parent_id():\n output = sh.process_parent_id()\n assert isinstance(output, int) and output > 0", "def process_exists(pid=None, name=None):\n\n return count_processes(pid, name) > 0", "def get_pid_filename(process_name):\n return os.path.join(settings.settings['pid_directory'], context.process_context[process_name].pid_filename)", "def _cmdline(process):\n return \" \".join(process.cmdline())", "def getmypid():\n raise NotImplementedError()", "def is_windows() -> bool:\n return sys.platform == \"win32\"", "def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results", "def get_process_id():\n process_id = os.environ[\"WS_PROCESS_ID\"]\n return process_id", "def _is_running(self, package_name):\n cmd = r' |echo $(grep -E {package_name})'.format(package_name=package_name)\n if self.device.sdk_version > 25:\n cmd = r'ps -A' + cmd\n else:\n cmd = r'ps' + cmd\n processes = self.adb_client.shell(cmd).splitlines()\n for ps in processes:\n if ps:\n ps = ps.split()\n return ps[1]\n return None", "def getwindowsversion(): # real signature unknown; restored from __doc__\n pass", "def processExists(uPid):\n # We try open the process for waiting since this is generally only forbidden in a very few cases.\n try:\n hProcess = win32api.OpenProcess(win32con.SYNCHRONIZE, False, uPid); # pylint: disable=no-member\n except pywintypes.error as oXcpt: # pylint: disable=no-member\n if oXcpt.winerror == winerror.ERROR_INVALID_PARAMETER:\n return False;\n if oXcpt.winerror != winerror.ERROR_ACCESS_DENIED:\n reporter.logXcpt('uPid=%s oXcpt=%s' % (uPid, oXcpt));\n return False;\n reporter.logXcpt('uPid=%s oXcpt=%s' % (uPid, oXcpt));\n except Exception as oXcpt:\n reporter.logXcpt('uPid=%s' % (uPid,));\n return False;\n else:\n hProcess.Close(); #win32api.CloseHandle(hProcess)\n return True;", "def is_process_running(pid):\n return os.path.exists(\"/proc/%s\" % pid)", "def from_subprocess():\n try:\n import subprocess\n except ImportError:\n return None\n try:\n return subprocess.check_output(\n ['uname', '-p'],\n stderr=subprocess.DEVNULL,\n text=True,\n encoding=\"utf8\",\n ).strip()\n except (OSError, subprocess.CalledProcessError):\n pass", "def pid():\n return 0x0204", "def pid():\n return 0x0204", "def get_window_id_by_pid(pid):\n from subprocess import check_output\n # Looks like:\n # 0x03c00041 0 3498 skipper Mozilla Firefox\n # WindowID ? PID USER Window Name\n # Needs sudo apt-get install wmctrl -lp\n\n output = check_output('wmctrl -lp', shell=True)\n # Find the line with the PID we are looking for\n for line in output.splitlines():\n fields = line.split()\n if len(fields) >= 3:\n this_pid = int(fields[2])\n if this_pid == pid:\n return int(fields[0], 16)\n return None", "def check_running(process, min=1):\n if j.data.platform.is_linux():\n pids = get_pids(process)\n if len(pids) >= min:\n return True\n return False", "def checkProcess(self):\n process = subprocess.Popen(\"ps -A | grep g13d\", stdout=subprocess.PIPE, shell=True)\n out, err = process.communicate()\n if out != '':\n self.ui.but_activate.setEnabled(False)\n self.ui.lab_active.setText(\"Running ok\")\n self.ui.lab_active.setStyleSheet(\"QLabel { background-color : none; color : green; }\");\n else:\n self.ui.but_activate.setEnabled(True)\n self.ui.lab_active.setText(\"Not Started\")\n self.ui.lab_active.setStyleSheet(\"QLabel { background-color : none; color : red; }\");", "def get_sys_name(self):\n\t\treturn call_sdk_function('PrlVmDevHdPart_GetSysName', self.handle)", "def get_pid_status(pid):\n if pid != 0:\n pid_str = pid\n else:\n pid_str = 'self'\n\n # read procfs /proc/PID/stat file to get PID status\n try:\n with open(\"/proc/{}/stat\".format(pid_str)) as stat_file:\n # split by '(' and ')' needed\n # as processes/threads could have spaces in the name\n line_split = stat_file.readline().strip().split(')')\n pid_name = line_split[0].split('(')[1]\n state = line_split[1].strip().split(' ')[0]\n return state, state in PID_VALID_STATUS, pid_name\n except EnvironmentError:\n pass\n\n return 'E', False, ''", "def get_pid(name: str) -> Set[int]:\n process_pids = set()\n for proc in psutil.process_iter():\n if name == proc.name():\n pid = proc.pid\n process_pids.add(pid)\n return process_pids", "def _pid(self, name):\n return self.pid_lookup[name]", "def checkIfProcessRunning(processName):\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n # Check if process name contains the given name string.\n if processName.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False", "def checkIfProcessRunning(processName):\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n # Check if process name contains the given name string.\n if processName.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False", "def isWindows(cls):\n return WIN", "def processCreate(sName, asArgs):\n\n # Construct a command line.\n sCmdLine = '';\n for sArg in asArgs:\n if sCmdLine == '':\n sCmdLine += '\"';\n else:\n sCmdLine += ' \"';\n sCmdLine += sArg;\n sCmdLine += '\"';\n\n # Try start the process.\n # pylint: disable=no-member\n dwCreationFlags = win32con.CREATE_NEW_PROCESS_GROUP;\n oStartupInfo = win32process.STARTUPINFO();\n try:\n (hProcess, hThread, uPid, uTid) = win32process.CreateProcess(sName,\n sCmdLine, # CommandLine\n None, # ProcessAttributes\n None, # ThreadAttibutes\n 1, # fInheritHandles\n dwCreationFlags,\n None, # Environment\n None, # CurrentDirectory.\n oStartupInfo);\n except:\n reporter.logXcpt('sName=\"%s\" sCmdLine=\"%s\"' % (sName, sCmdLine));\n return (-1, None, -1);\n\n # Dispense with the thread handle.\n try:\n hThread.Close(); # win32api.CloseHandle(hThread);\n except:\n reporter.logXcpt();\n\n # Try get full access to the process.\n try:\n hProcessFullAccess = win32api.DuplicateHandle(\n win32api.GetCurrentProcess(),\n hProcess,\n win32api.GetCurrentProcess(),\n win32con.PROCESS_TERMINATE\n | win32con.PROCESS_QUERY_INFORMATION\n | win32con.SYNCHRONIZE\n | win32con.DELETE,\n False,\n 0);\n hProcess.Close(); # win32api.CloseHandle(hProcess);\n hProcess = hProcessFullAccess;\n except:\n reporter.logXcpt();\n reporter.log2('processCreate -> %#x, hProcess=%#x' % (uPid, hProcess,));\n return (uPid, hProcess, uTid);", "def try_set_process_name(self, name=None):\n if name is None:\n name = getattr(self, 'process_name', None)\n if name is None:\n return\n try:\n import setproctitle\n setproctitle.setproctitle(name)\n except (ImportError, AttributeError):\n pass", "def is_pid_valid(pid):\n return get_pid_status(pid)[1]", "def get_sys_name(self):\n\t\treturn call_sdk_function('PrlVmDev_GetSysName', self.handle)", "def check_ambari_server_process_up(self):\n process_name = \"ambari-server\"\n output = self.__find_process(process_name)\n return re.search(process_name, output)", "def is_64_windows(self):\n return 'PROGRAMFILES(X86)' in os.environ", "def getActiveWindowName(display):\n\n # we can't get it via powershell as a system process, so we need to get it from a file \n # that gets written to through a scheduled task (hopefully)\n\n # get user's TEMP path\n tempPath = getCurrentUserTempPath()\n\n if tempPath:\n windowOutput = ''\n try:\n with open(tempPath+\"mqttNanny-activeWindow.txt\", encoding=\"utf-16\") as file:\n windowOutput = file.read()\n except IOError as e:\n logger.error(\"Error while reading active window name: {}\".format(str(e)))\n return ''\n\n # File contents looks like this:\n #\n #ProcessName AppTitle \n #----------- -------- \n #WindowsTerminal Windows PowerShell \n \n processNameLength = 0\n dashesMatched = False\n activeWindows = []\n\n for line in iter(windowOutput.splitlines()):\n #ignore blank lines\n if re.match('^\\s*$', line):\n continue\n logger.debug(line)\n # look for ----------- --------\n matchDashes = re.match(r'^([-]+\\s+)([-]+\\s*)', line, re.UNICODE)\n if matchDashes:\n # we need to count the length of the columns so that we can more easily parse it\n processNameLength = len(matchDashes.group(1))\n logger.debug(\"processNameLength = {}\".format(processNameLength))\n dashesMatched = True\n continue\n \n if dashesMatched:\n # we'll split the line based on length\n # some lines may not have all the data, skip them\n if len(line) >= processNameLength:\n processName = line[0:processNameLength].rstrip(\" \")\n title = line[processNameLength:].rstrip(\" \")\n \n activeWindows.append(processName + \": \" + title)\n \n if len(activeWindows) == 1:\n #this is normal, one active window\n return activeWindows[0]\n elif len(activeWindows) == 0:\n return \"No window\"\n else:\n # more than one active window is a problem - couldn't get active windows...\n logger.warning(\"Found \"+str(len(activeWindows))+\" active windows. This is not ok.\")\n return \"Error - couldn't get active window\"", "def get_process_token():\n # Reference\n # https://gist.github.com/schlamar/7024668\n GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess\n GetCurrentProcess.restype = wintypes.HANDLE\n OpenProcessToken = ctypes.windll.advapi32.OpenProcessToken\n OpenProcessToken.argtypes = (wintypes.HANDLE, wintypes.DWORD, ctypes.POINTER(wintypes.HANDLE))\n OpenProcessToken.restype = wintypes.BOOL\n token = wintypes.HANDLE()\n\n # https://github.com/Alexpux/mingw-w64/blob/master/mingw-w64-tools/widl/include/winnt.h\n # TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY = 0x00020000 | 0x0008 = 0x20008\n # TOKEN_ALL_ACCESS = 0xf01ff\n\n TOKEN_READ = 0x20008\n res = OpenProcessToken(GetCurrentProcess(), TOKEN_READ, token)\n if not res > 0:\n raise RuntimeError(\"Couldn't get process token\")\n return token", "def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]", "def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system", "def split_name(process_name):\n identifier, box_name = process_name.split(\"-\")\n identifier = int(identifier)\n if Ibox.itersep in box_name:\n box_exec_name = box_name.split(\".\")[0]\n box_iter_name, iteration = box_exec_name.split(Ibox.itersep)\n iteration = int(iteration)\n else:\n box_exec_name = None\n box_iter_name = None\n iteration = None\n return identifier, box_name, box_exec_name, box_iter_name, iteration", "def __virtual__():\n if not salt.utils.platform.is_windows():\n return False, \"This utility only available on Windows\"\n\n return __virtualname__", "def validate_no_win32() -> None:\n try:\n assert sys.platform != \"win32\"\n except AssertionError:\n logger.exception(\"This application cannot run on Windows!\")\n sys.exit(1)", "def setup_process_stats(pid):\n return psutil.Process(pid)", "def _StartWorkerProcess(self, process_name):", "def pid_exists(pid):\n # http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid\n if os.name == 'posix':\n # OS X and Linux\n import errno\n if pid < 0:\n return False\n try:\n os.kill(pid, 0)\n except OSError as e:\n return e.errno == errno.EPERM\n else:\n return True\n else:\n # Windows\n import ctypes\n kernel32 = ctypes.windll.kernel32\n HANDLE = ctypes.c_void_p\n DWORD = ctypes.c_ulong\n LPDWORD = ctypes.POINTER(DWORD)\n class ExitCodeProcess(ctypes.Structure):\n _fields_ = [ ('hProcess', HANDLE),\n ('lpExitCode', LPDWORD)]\n\n SYNCHRONIZE = 0x100000\n process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)\n if not process:\n return False\n\n ec = ExitCodeProcess()\n out = kernel32.GetExitCodeProcess(process, ctypes.byref(ec))\n if not out:\n err = kernel32.GetLastError()\n if kernel32.GetLastError() == 5:\n # Access is denied.\n logging.warning(\"Access is denied to get pid info.\")\n kernel32.CloseHandle(process)\n return False\n elif bool(ec.lpExitCode):\n # print ec.lpExitCode.contents\n # There is an exist code, it quit\n kernel32.CloseHandle(process)\n return False\n # No exit code, it's running.\n kernel32.CloseHandle(process)\n return True", "def __virtual__():\n if not salt.utils.platform.is_windows():\n return False, \"This utility will only run on Windows\"\n\n return __virtualname__", "def is_running(proc_name:str) -> bool:\r\n with Popen(\"tasklist /NH /FO TABLE\", shell=False, stdout=PIPE) as proc:\r\n rprocs = proc.stdout.read().decode(\"utf-8\")\r\n plist = rprocs.split(\"\\r\\n\")\r\n return(any(i.lower().startswith(proc_name.lower()) for i in plist))", "def _CheckStatusWorkerProcess(self, pid):\n # TODO: Refactor this method, simplify and separate concerns (monitoring\n # vs management).\n self._RaiseIfNotRegistered(pid)\n\n process = self._processes_per_pid[pid]\n\n process_status = self._QueryProcessStatus(process)\n if process_status is None:\n process_is_alive = False\n else:\n process_is_alive = True\n\n process_information = self._process_information_per_pid[pid]\n used_memory = process_information.GetUsedMemory() or 0\n\n if self._worker_memory_limit and used_memory > self._worker_memory_limit:\n logger.warning((\n 'Process: {0:s} (PID: {1:d}) killed because it exceeded the '\n 'memory limit: {2:d}.').format(\n process.name, pid, self._worker_memory_limit))\n self._KillProcess(pid)\n\n if isinstance(process_status, dict):\n self._rpc_errors_per_pid[pid] = 0\n status_indicator = process_status.get('processing_status', None)\n\n else:\n rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1\n self._rpc_errors_per_pid[pid] = rpc_errors\n\n if rpc_errors > self._MAXIMUM_RPC_ERRORS:\n process_is_alive = False\n\n if process_is_alive:\n rpc_port = process.rpc_port.value\n logger.warning((\n 'Unable to retrieve process: {0:s} (PID: {1:d}) status via '\n 'RPC socket: http://localhost:{2:d}').format(\n process.name, pid, rpc_port))\n\n processing_status_string = 'RPC error'\n status_indicator = definitions.STATUS_INDICATOR_RUNNING\n else:\n processing_status_string = 'killed'\n status_indicator = definitions.STATUS_INDICATOR_KILLED\n\n process_status = {\n 'processing_status': processing_status_string}\n\n self._UpdateProcessingStatus(pid, process_status, used_memory)\n\n # _UpdateProcessingStatus can also change the status of the worker,\n # So refresh the status if applicable.\n for worker_status in self._processing_status.workers_status:\n if worker_status.pid == pid:\n status_indicator = worker_status.status\n break\n\n if status_indicator in definitions.ERROR_STATUS_INDICATORS:\n logger.error((\n 'Process {0:s} (PID: {1:d}) is not functioning correctly. '\n 'Status code: {2!s}.').format(process.name, pid, status_indicator))\n\n self._TerminateProcessByPid(pid)\n\n replacement_process = None\n replacement_process_name = 'Worker_{0:02d}'.format(\n self._last_worker_number)\n for replacement_process_attempt in range(\n self._MAXIMUM_REPLACEMENT_RETRIES):\n logger.info((\n 'Attempt: {0:d} to start replacement worker process for '\n '{1:s}').format(replacement_process_attempt + 1, process.name))\n\n replacement_process = self._StartWorkerProcess(replacement_process_name)\n if replacement_process:\n break\n\n time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY)\n\n if not replacement_process:\n logger.error(\n 'Unable to create replacement worker process for: {0:s}'.format(\n process.name))", "def getApplicationwindowId(ReferenceID):\n try:\n ldtp.wait(5)\n window = ReferenceID.windows()[0]\n logging.info(\"Application id of the window : %s\" % window)\n except Exception as er:\n logging.info('Not able to get window name of Application')\n return False\n return window", "def count_processes(pid=None, name=None):\n counter = 0\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if ((pid is None or process.Properties_(\"ProcessID\").Value == pid) and\n (name is None or process.Properties_(\"Name\").Value == name)):\n counter += 1\n return counter", "def is_64_windows():\n return 'PROGRAMFILES(X86)' in os.environ", "def GetChromePid(self):\n result = self.GetChromeProcess()\n if result and 'pid' in result:\n return result['pid']\n return None", "def test_skip_if_windows(self):\n pass", "def start(self, process_id=None):\n try:\n self.process = psutil.Process(process_id)\n logging.debug(self.process.connections())\n logging.debug(self.process.ppid())\n return \"Process Started\"\n except Exception as e:\n logging.exception(e)\n return \"Process doesnt exists\"", "def get_pid(name, path=None):\n if name not in list_(limit=\"running\", path=path):\n raise CommandExecutionError(\n f\"Container {name} is not running, can't determine PID\"\n )\n info = __salt__[\"cmd.run\"](f\"lxc-info -n {name}\").split(\"\\n\")\n pid = [\n line.split(\":\")[1].strip()\n for line in info\n if re.match(r\"\\s*PID\", line) is not None\n ][0]\n return pid", "def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess", "def infogpuname(self):\n return self._infogpuname", "def find(name, arg=None):\r\n for p in get_processes():\r\n if p.name.lower().find(name.lower()) != -1:\r\n if arg is not None:\r\n for a in (p.cmdline or []):\r\n if a.lower().find(arg.lower()) != -1:\r\n return p\r\n else:\r\n return p\r\n return None", "def _get_pid(split_data, sensor):\n prot, ip_dst, port_dst, timestamp = split_data\n prot = prot.lower()\n\n if not sanitizer.check_get_pid_params(prot, ip_dst, port_dst, timestamp):\n return '-1,error checking input'\n\n return sensor.search_process(prot, ip_dst, port_dst, timestamp)", "def skip_on_windows (func):\n import sys\n\n return skip_if(sys.platform.startswith('win'))(func)", "def checkPID(pid):\n\tif pid == 0:\t#If PID newly created return False\n\t\treturn False\n\ttry:\n\t\tos.kill(pid, 0)\n\texcept OSError:\n\t\treturn False\n\telse:\n\t\treturn True" ]
[ "0.6427718", "0.627324", "0.6176712", "0.6168791", "0.61007553", "0.6070687", "0.60217047", "0.60116947", "0.5992538", "0.5970814", "0.59083015", "0.5876524", "0.58620656", "0.5821407", "0.5805197", "0.57778066", "0.5776582", "0.5765953", "0.57581586", "0.57557404", "0.57374895", "0.5732777", "0.5712636", "0.5705129", "0.57000494", "0.5695862", "0.56845045", "0.5672988", "0.56560445", "0.5639912", "0.5627396", "0.5610628", "0.56006604", "0.55916893", "0.55903816", "0.55395466", "0.5521779", "0.5508678", "0.54987013", "0.5452757", "0.53961027", "0.5384746", "0.53788465", "0.53773636", "0.5369428", "0.53533953", "0.5353127", "0.53468376", "0.53431404", "0.53425574", "0.53413135", "0.53365666", "0.5309969", "0.52871656", "0.5286612", "0.5282104", "0.52555466", "0.52555466", "0.52348834", "0.5225264", "0.5222001", "0.5218785", "0.5217916", "0.52162796", "0.5209989", "0.52026564", "0.52026564", "0.52006644", "0.52000505", "0.5197039", "0.51776785", "0.51689357", "0.51626295", "0.5157666", "0.5129021", "0.51274616", "0.5124892", "0.51237947", "0.5096719", "0.50810176", "0.50778675", "0.5071616", "0.5065455", "0.5061287", "0.5060865", "0.5059485", "0.50560236", "0.50536495", "0.5046513", "0.50455946", "0.50398517", "0.50342846", "0.50335175", "0.50308794", "0.50270194", "0.50231206", "0.500992", "0.5006358", "0.49908364", "0.49904478" ]
0.71424156
0
Returns a (pid, handle, tid) tuple on success. (1, None) on failure (logged).
def processCreate(sName, asArgs): # Construct a command line. sCmdLine = ''; for sArg in asArgs: if sCmdLine == '': sCmdLine += '"'; else: sCmdLine += ' "'; sCmdLine += sArg; sCmdLine += '"'; # Try start the process. # pylint: disable=no-member dwCreationFlags = win32con.CREATE_NEW_PROCESS_GROUP; oStartupInfo = win32process.STARTUPINFO(); try: (hProcess, hThread, uPid, uTid) = win32process.CreateProcess(sName, sCmdLine, # CommandLine None, # ProcessAttributes None, # ThreadAttibutes 1, # fInheritHandles dwCreationFlags, None, # Environment None, # CurrentDirectory. oStartupInfo); except: reporter.logXcpt('sName="%s" sCmdLine="%s"' % (sName, sCmdLine)); return (-1, None, -1); # Dispense with the thread handle. try: hThread.Close(); # win32api.CloseHandle(hThread); except: reporter.logXcpt(); # Try get full access to the process. try: hProcessFullAccess = win32api.DuplicateHandle( win32api.GetCurrentProcess(), hProcess, win32api.GetCurrentProcess(), win32con.PROCESS_TERMINATE | win32con.PROCESS_QUERY_INFORMATION | win32con.SYNCHRONIZE | win32con.DELETE, False, 0); hProcess.Close(); # win32api.CloseHandle(hProcess); hProcess = hProcessFullAccess; except: reporter.logXcpt(); reporter.log2('processCreate -> %#x, hProcess=%#x' % (uPid, hProcess,)); return (uPid, hProcess, uTid);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pid_tid():\n # noinspection PyBroadException\n try:\n return \"(pid=%s) (tid=%s)\" % (\n six.text_type(os.getpid()),\n six.text_type(six.moves._thread.get_ident()),\n )\n except Exception:\n return \"(pid=%s) (tid=Unknown)\" % (six.text_type(os.getpid()))", "def __gettid():\r\n try:\r\n import platform\r\n if not platform.system().startswith('Linux'):\r\n raise ValueError\r\n syscalls = {\r\n 'i386': 224, # unistd_32.h: #define __NR_gettid 224\r\n 'x86_64': 186, # unistd_64.h: #define __NR_gettid 186\r\n }\r\n import ctypes\r\n tid = ctypes.CDLL('libc.so.6').syscall(syscalls[platform.machine()])\r\n except:\r\n tid = -1\r\n return tid", "def getmypid():\n raise NotImplementedError()", "def get_handle(self, pid):\r\n self._raise_unless_has_pid(pid)\r\n return self._translate_line_to_handle(self._raw[pid])", "def status(pid_file):\n if not os.path.exists(pid_file):\n return None\n\n pid = None\n with open(pid_file, \"r\") as pf:\n pid = pf.read().strip()\n\n if not pid:\n logger.error(\"Unable to retrieve pid from %s\" % pid_file)\n return None\n\n if not pid.isdigit():\n logger.error(\"Invalid pid %s read from %s\" % (pid, pid_file))\n return None\n\n pid = int(pid)\n\n try:\n # Send 0 signal to check if the process is alive.\n os.kill(pid, 0)\n except OSError as e:\n logger.debug(\"%s\" % e, exc_info=True)\n return None\n return pid", "def get_pid_status(pid):\n if pid != 0:\n pid_str = pid\n else:\n pid_str = 'self'\n\n # read procfs /proc/PID/stat file to get PID status\n try:\n with open(\"/proc/{}/stat\".format(pid_str)) as stat_file:\n # split by '(' and ')' needed\n # as processes/threads could have spaces in the name\n line_split = stat_file.readline().strip().split(')')\n pid_name = line_split[0].split('(')[1]\n state = line_split[1].strip().split(' ')[0]\n return state, state in PID_VALID_STATUS, pid_name\n except EnvironmentError:\n pass\n\n return 'E', False, ''", "def pid():\n return 0x0204", "def pid():\n return 0x0204", "def pid(self):", "def fuse_get_context():\n ctxp = _libfuse.fuse_get_context()\n ctx = ctxp.contents\n return ctx.uid, ctx.gid, ctx.pid", "def getpid(command):\n try:\n _pidof = executeCommand(command)\n except Exception as er:\n print (\" not able to get pid\")\n return False\n return _pidof", "def ppid(self):", "def trace_id_get() -> tuple[str, str] | None:\n return trace_id_cv.get()", "def pidGet(self) -> float:\n ...", "def pidGet(self) -> float:\n ...", "def _get_my_tid(self):\n\t\tif not self.isAlive():\n\t\t\traise threading.ThreadError(\"the thread is not active\")\n\t\t# do we have it cached?\n\t\tif hasattr(self, \"_thread_id\"):\n\t\t\treturn self._thread_id\n\t\t# no, look for it in the _active dict\n\t\tfor tid, tobj in threading._active.items():\n\t\t\tif tobj is self:\n\t\t\t\tself._thread_id = tid\n\t\t\t\treturn tid\n\t\traise AssertionError(\"could not determine the thread's id\")", "def getuid(): # type: ignore\n return 0", "def get_pid(self):\n if self.status():\n file = open(os.path.join(self.data_dir, 'postmaster.pid'))\n pid = int(file.readline())\n return pid\n else:\n return None", "def _get_my_tid(self):\n if not self.is_alive():\n raise threading.ThreadError(\"the thread is not active\")\n\n # do we have it cached?\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n\n # no, look for it in the _active dict\n for tid, tobj in threading._active.items():\n if tobj is self:\n self._thread_id = tid\n return tid\n\n raise AssertionError(\"could not determine the thread's id\")", "def get_ts_pid(pidfile):\n try:\n with open(pidfile) as f:\n pid = f.readline()\n if pid.strip().isdigit():\n pid = int(pid.strip())\n else:\n LOG.warning(\"Unable to read pidfile %s file contains %r; process metrics will fail!\", pidfile, pid)\n pid = None\n except EnvironmentError:\n LOG.warning(\"Unable to read pidfile %s; process metrics will fail!\", pidfile)\n pid = None\n return pid", "def _get_pid(split_data, sensor):\n prot, ip_dst, port_dst, timestamp = split_data\n prot = prot.lower()\n\n if not sanitizer.check_get_pid_params(prot, ip_dst, port_dst, timestamp):\n return '-1,error checking input'\n\n return sensor.search_process(prot, ip_dst, port_dst, timestamp)", "def _get_my_tid(self):\n if not self.isAlive():\n raise threading.ThreadError(\"the thread is not active\")\n\n # do we have it cached?\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n\n # no, look for it in the _active dict\n for tid, tobj in threading._active.items():\n if tobj is self:\n self._thread_id = tid\n return tid\n\n raise AssertionError(\"could not determine the thread's id\")", "def sephandle(handle):\n if re.match('^[a-zA-Z]+[a-zA-Z0-9_-]*@[a-z0-9.]+\\.[a-z]+$', handle) is None:\n raise errors.InvalidHandleError('{0}'.format(handle))\n handle = handle.split('@')\n pod, user = handle[1], handle[0]\n return (pod, user)", "def get_identity():\n identity = multiprocessing.current_process()._identity\n identity = 0 if not identity else identity[0]\n\n identity = (identity, threading.current_thread().ident)\n return identity", "def pid(self):\n\t\treturn self.__pid", "def get_token(self, tid):\n if self.lliagraph:\n return self.lliagraph.get_token(tid)\n else:\n return None", "def request_status(self):\n\n pid = self.__read_pidfile()\n if pid is None:\n return errno.ESRCH\n\n try:\n os.kill(pid, signal.SIGINT)\n except OSError, e:\n if e.errno == errno.ESRCH or e.errno == errno.EPERM:\n return e.errno\n raise e\n return None", "def get_tid(self, reference):\n\n tid = self.ref2tid.get(reference, -1)\n if tid == -1:\n raise KeyError('{} was not found in the file header'.format(reference))\n return tid", "def get_window_thread_process_id(h_wnd):\n _get_window_thread_process_id = WINDLL.user32.GetWindowThreadProcessId\n _get_window_thread_process_id.argtypes = [HWND, LPDWORD]\n _get_window_thread_process_id.restype = DWORD\n _get_window_thread_process_id.errcheck = raise_if_zero\n\n dw_process_id = DWORD(0)\n dw_thread_id = _get_window_thread_process_id(h_wnd, BY_REF(dw_process_id))\n return (dw_thread_id, dw_process_id.value)", "def pid_for_socket(socket_number):\n\n\tbn = os.path.basename\n\trp = os.path.realpath\n\n\tsearched = 'socket:[%s]' % socket_number\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\t\t\t\tcurrent_path = '/proc/%s/fd' % entry\n\t\t\t\tfor openfd in os.listdir(current_path):\n\t\t\t\t\tif searched == bn(rp('%s/%s' % (current_path, openfd))):\n\t\t\t\t\t\treturn int(entry)\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn None", "def pid(self):\n return self._query_status()['pid']", "def get_ident():\n return -1", "def get_pid(pidfile):\n pid = None\n if os.path.exists(pidfile):\n with open(pidfile, 'r') as f:\n pid = f.read()\n return pid", "def test_003_pid(self):\n HEADING()\n pid = self.db.pid()\n print (pid)\n assert True", "def test_process_id():\n output = sh.process_id()\n assert isinstance(output, int) and output > 0", "def getPid(self):\n try:\n fh = open(self.filename)\n except OSError:\n return None\n line = fh.readline()\n try:\n return string.atoi(line) # trailing newline doesn't matter\n except ValueError:\n return None", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "def get_process_info_by_pid(self, pid):\n # TODO: discuss if self.logger needs to happen here? I think not? -BY\n\n for process in self.data_storage.running_data:\n if self.data_storage.running_data[process]['pid'] == pid:\n return self.data_storage.running_data[process]", "def getmyuid():\n raise NotImplementedError()", "def get_pid(self):\n return self.k_p, self.k_i, self.k_d", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def _get_tuple_for_process(self, process):\n # FIX:\n # if list(item[MECHANISM] for item in self.mech_tuples).count(mech):\n # if self.owner.verbosePref:\n # print(\"PROGRAM ERROR: {} found in more than one mech_tuple in {} in {}\".\n # format(append_type_to_name(mech), self.__class__.__name__, self.owner.name))\n return next((ProcessTuple for ProcessTuple in self.process_tuples if ProcessTuple.process is process), None)", "def get_memory_of_pid(pid):\n try:\n with open(LINUX_PROCESS_STAT_LOCATION % pid, 'r') as f:\n pid_entries = f.read().split(' ')\n except IOError:\n return None\n\n pid_mem = 0\n if len(pid_entries) > 23:\n pid_mem = int(pid_entries[22])\n return pid_mem", "def _get_handle(self, context):\n\n metadata = context.invocation_metadata()\n metadata_dict = {}\n for key, value in metadata:\n metadata_dict[key] = value\n return metadata_dict[self.HANDLE_KEY]", "def pid(self):\n return self.__pid", "def get_pid(self):\n try:\n pf = open(self.pidfile,'r')\n pid = int(pf.read().strip())\n pf.close()\n except (IOError, TypeError):\n pid = None\n return pid", "def _get_uid(name):\n if getpwnam is None or name is None:\n return None\n try:\n result = getpwnam(name)\n except KeyError:\n result = None\n if result is not None:\n return result[2]\n return None", "def handle(self) -> int:\n return self.obj.handle", "async def get_thread_info(self) -> Any:\n return await self.AD.threading.get_thread_info()", "def get_pid(packet):\n return ((ord(packet[TS.PID_START_INDEX]) & 0x1f)<<8) | ord(packet[TS.PID_START_INDEX+1])", "def get_window_id_by_pid(pid):\n from subprocess import check_output\n # Looks like:\n # 0x03c00041 0 3498 skipper Mozilla Firefox\n # WindowID ? PID USER Window Name\n # Needs sudo apt-get install wmctrl -lp\n\n output = check_output('wmctrl -lp', shell=True)\n # Find the line with the PID we are looking for\n for line in output.splitlines():\n fields = line.split()\n if len(fields) >= 3:\n this_pid = int(fields[2])\n if this_pid == pid:\n return int(fields[0], 16)\n return None", "def _stat(self):\n st = os.lstat(self.resource.name)\n uid = st.st_uid\n gid = st.st_gid\n mode = stat.S_IMODE(st.st_mode)\n return uid, gid, mode", "def _get_pid(self):\n ps_txt = six.ensure_str(self.controller.run(\n args=[\"ps\", \"ww\", \"-u\"+str(os.getuid())]\n ).stdout.getvalue()).strip()\n lines = ps_txt.split(\"\\n\")[1:]\n\n for line in lines:\n if line.find(\"ceph-{0} -i {1}\".format(self.daemon_type, self.daemon_id)) != -1:\n log.info(\"Found ps line for daemon: {0}\".format(line))\n return int(line.split()[0])\n log.info(\"No match for {0} {1}: {2}\".format(\n self.daemon_type, self.daemon_id, ps_txt\n ))\n return None", "def get_handles(_pid):\n\n system_handle_information = SYSTEM_HANDLE_INFORMATION_EX()\n size = DWORD (sizeof (system_handle_information))\n while True:\n result = ntdll.NtQuerySystemInformation(\n SystemExtendedHandleInformation,\n byref(system_handle_information),\n size,\n byref(size)\n )\n\n if result == STATUS_SUCCESS:\n break\n\n elif result == STATUS_INFO_LENGTH_MISMATCH:\n size = DWORD(size.value * 4)\n resize(system_handle_information, size.value)\n\n else:\n raise x_file_handles(\"NtQuerySystemInformation\", hex(result))\n\n pHandles = cast(\n system_handle_information.Handles,\n POINTER(SYSTEM_HANDLE_TABLE_ENTRY_INFO_EX * \\\n system_handle_information.NumberOfHandles)\n )\n\n content = []\n # handle.UniqueProcessId, handle.HandleValue, handle.Object \n count = 0\n for handle in pHandles.contents:\n if handle.UniqueProcessId == _pid:\n #if \"0x85\" <= hex(handle.Object)[:4] <= \"0x87\":\n if hex(handle.Object)[:4] == \"0x87\":\n #if handle.Object == 0x86cec490: # PROCESS 86cec490 SessionId: 0 Cid: 017c Peb: 7ffd5000 ParentCid: 0168\n # DirBase: be6e2060 ObjectTable: 909b8678 HandleCount: 470.\n # Image: winlogon.exe\n content.append(handle.Object)\n print(\"\\t|\" + str(handle.UniqueProcessId) + \":\" + str(handle.HandleValue) + \":\" + str(hex(handle.Object)))\n count += 1\n\n print(\"\\n[!] [%i] Leaked Pointer found!\" % count)\n\n return content", "def get_pid(ssh):\n\n\tpid_file_path = data_dir.MM_PID_DIR+\"master_monitord.pid\" #獲得master_monitord.pid之檔案路徑\n\tcmd = \"sudo cat %s\" % pid_file_path #組合cat指令\n\ts_stdin, s_stdout, s_stderr = ssh.exec_command(cmd) #透過ssh執行指令\n\treturn s_stdout.read()\n\t#pid, error = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE).communicate() #執行指令\n\t#if error == None:\n\t#\treturn int(pid)\n\t#return False", "def get_handle_from_gramps_id(self, gid):\n obj = self.dbstate.db.get_object_from_gramps_id(gid)\n if obj:\n return obj.get_handle()\n else:\n return None", "def get_temp_user(temp_uid):\r\n rows = query_db(GET_TEMP_USER_BY_ID, (temp_uid,))\r\n if (not rows) or (len(rows) == 0):\r\n return None\r\n else:\r\n return rows[0]", "def get_pid_processor(pid):\n if pid != 0:\n pid_str = pid\n else:\n pid_str = 'self'\n\n # read procfs /proc/PID/stat file to get info about processor\n # that PID was scheduled on last time\n try:\n with open(\"/proc/{}/stat\".format(pid_str)) as stat_file:\n proc_stat = stat_file.readline().strip().split(' ')\n return int(proc_stat[39])\n except EnvironmentError:\n return -1", "def pidHTTPd(htconf):\n# pcmd = \"/bin/ps -ef|/bin/grep \" + htconf + \"|/bin/grep -v grep|/bin/awk '{print $3}'|sort -u\"\n# pcmd = '/bin/ps -ef'\n# pcmd = 'ps -eo pid,ppid,rss,vsize,pcpu,pmem,cmd -ww --sort=pid'\n pcmd = 'ps -eo pid,ppid,rss,cmd'\n# produces something like the following (when selecting htconf == httpd.conf99)\n#30645 1 4420 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30646 30645 1640 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30647 30645 2628 /devstore/apache2/bin/fcgi- -f /devstore/apache2/conf/httpd.conf99\n#30648 30645 37216 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30649 30645 3064 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30650 30645 3064 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30651 30645 3064 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30652 30645 3064 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n#30656 30645 3064 /devstore/apache2/bin/httpd -f /devstore/apache2/conf/httpd.conf99\n# the 3rd column is ram util. in units of kbytes, kill the entire set of httpd pids\n# (by killing parent proc) if any one thread exceeds 1,000,000 kbytes (> 1gb)\n pid = os.popen(pcmd, \"r\")\n ppids = [] ; pids = [] ; memuse = [] ; vals = [] \n parentpid = -1\n restart = False\n try:\n for p in pid.xreadlines():\n p.rstrip()\n vals = p.split()\n conf = vals[-1] # last val should be confile\n try:\n if conf.index(htconf) >= 0 :\n pids.append(int(vals[0]))\n ppids.append(int(vals[1]))\n memuse.append(int(vals[2])) \n# print >> FileKeyUtils.WMSlog, 'pidHTTPd> pids: ', pids, ', ppids: ', ppids, ' memuse: ', memuse\n if int(vals[1]) == 1 : parentpid = int(vals[0])\n if int(vals[2]) > 1000000 : restart = True\n except:\n# print >> FileKeyUtils.WMSlog, 'pidHTTPd> vals: ', vals\n pass\n except: pass\n\n print >> FileKeyUtils.WMSlog, 'pidHTTPd>', htconf, ' parentpid:', parentpid, ', memuse:', memuse\n if restart: # return parentpid\n return parentpid\n\n return -1", "def pid(self):\n return self._pid", "def pid(self):\n return self._pid", "def pid(self):\n return self._pid", "def getPidStatus(self, seg, pidRunningStatus):\n\n lockFileExists = pidRunningStatus['lockFileExists']\n netstatPortActive = pidRunningStatus['netstatPortActive']\n pidValue = pidRunningStatus['pidValue']\n\n lockFileName = gp.get_lockfile_name(seg.getSegmentPort())\n\n error = None\n if not lockFileExists and not netstatPortActive:\n error = \"No socket connection or lock file (%s) found for port %s\" % (lockFileName, seg.getSegmentPort())\n elif not lockFileExists and netstatPortActive:\n error = \"No lock file %s but process running on port %s\" % (lockFileName, seg.getSegmentPort())\n elif lockFileExists and not netstatPortActive:\n error = \"Have lock file %s but no process running on port %s\" % (lockFileName, seg.getSegmentPort())\n else:\n if pidValue == 0:\n error = \"Have lock file and process is active, but did not get a pid value\" # this could be an assert?\n\n res = {}\n res['pid'] = pidValue\n res['error'] = error\n return res", "def get_command(pid):", "def pid_from_context_or_data(value, context, **kwargs):\n pid = (context or {}).get('pid')\n if pid is None:\n return value\n else:\n return pid.pid_value", "def get_uid(username):\n\t\tif username is None:\n\t\t\treturn\n\t\tcon = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')\n\t\twith con:\n\t\t\tquery = \"SELECT id from auth_user WHERE username=%s\" % (username)\n\t\t\tcur = con.cursor()\n\t\t\tcur.execute(query)\n\t\t\tdata = cur.fetchall()\n\t\t\tprint \"len(data)\"\n\t\t\tprint data\n\t\t\tif len(data) > 0:\n\t\t\t\treturn data[0]\n\t\t\treturn None", "def check_pid(pid):\n result = None\n try:\n s = os.stat('/proc/' + pid)\n if s.st_uid == our_uid:\n cwd = os.path.realpath('/proc/' + pid + '/cwd')\n if cwd == kill_dir and int(pid) != our_pid:\n f = open('/proc/' + pid + '/cmdline')\n cmdline = f.read().split('\\x00')[:-1]\n f.close()\n result = cmdline\n except OSError:\n # We can't read all our processes; that's ok\n pass\n return result", "def get_PID(self):\n return self.PID", "def pid_gid(p):\n return (p['iOrder'], p['iGroup'])", "def execute_success(self, *args, **kwargs):\n return 0, self.shell_output, None", "def status(name, sig=None):\n cmd = \"svstat {}\".format(_service_path(name))\n out = __salt__[\"cmd.run_stdout\"](cmd, python_shell=False)\n try:\n pid = re.search(r\"\\(pid (\\d+)\\)\", out).group(1)\n except AttributeError:\n pid = \"\"\n return pid", "def pcp_process_info(self, pid):\n\t\tif self.PCPConnectionStatus() != ConnStateType.OK:\n\t\t\tself.pcp_internal_error('invalid PCP connection')\n\t\t\treturn None\n\n\t\tprocess_id = str(pid)\n\t\tself._PCPWrite('P'.encode(), 1)\n\t\twsize = self.int_to_bytes(len(process_id) + 1 + 4)\n\t\tself._PCPWrite(wsize, 4)\n\t\tself._PCPWrite(process_id.encode() + NULL, len(process_id) + 1)\n\t\tif self.PCPFlush() < 0:\n\t\t\treturn None\n\t\tif self.Pfdebug:\n\t\t\tself.Pfdebug.write(f'DEBUG: send: tos=\"P\", length={self.bytes_to_int(wsize)}\\n')\n\n\t\treturn self._process_pcp_response('P')", "def getNode(self):\r\n try:\r\n output,error = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()\r\n if self.jobId in output:\r\n return output.split(\"\\t\")[7]\r\n if len(error) > 0:\r\n logging.error(error)\r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def GetProcessHandle(access_right: int, inherit: bool, pid: int) -> int:\n return kernel32.OpenProcess(access_right, inherit, pid)", "def parseID(uid):\n\n info = uid.split('_')\n if len(info) != 3:\n print('invalid ID')\n return(1)\n plot = info[0]\n height = info[1]\n tree_id = info[2]\n\n return plot, height, tree_id", "def process_infos(str=\"???\"):\n # stdin/stdout not always connected to a controlling terminal\n try:\n term_owner0 = os.tcgetpgrp(0)\n except OSError:\n term_owner0 = 0\n try:\n term_owner1 = os.tcgetpgrp(1)\n except OSError:\n term_owner1 = 0\n return \"processus %s: pid=%d, pere=%d, groupe=%d, term owner:%d/%d, sid=%d\"%(str,os.getpid(),os.getppid(),os.getpgid(0),term_owner0,term_owner1, os.getsid(0))", "def check_pid(pid):\n try:\n os.kill(pid, 0)\n except OSError as ex:\n template = \"An exception of type {0} occured.\\nArguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print (message)\n return False\n else:\n return True", "def pid(self):\n # type: () -> Optional[int]\n try:\n return self._process.pid # type: ignore # pylint: disable=no-member\n except:\n return None", "def process_results(process_object):\n (stdout, stderr)=process_object.communicate()\n return (process_object.returncode, stdout, stderr)", "def find_job_and_job_status(self):\n\n def find_job_and_job_status_log_history(f):\n rcelog('critical', \"find_job_and_status(): Found job {0} in history. Terminated in error.\".\n format(self.id))\n return f\n\n try:\n return self.__get_job_status_from_queue__()\n except:\n pass\n\n try:\n return find_job_and_job_status_log_history(self.__get_job_status_from_history__())\n except:\n return (None, None)", "async def get_pin_thread(self) -> int:\n return await self.AD.threading.get_pin_thread(self.name)", "def get_daemon_pid():\n try:\n return _get_pid_from_pidfile()\n except (FileNotFoundError, ValueError):\n return None", "def GetHandlePosition(self):\n ...", "def get_process_object(pid, die=True):\n try:\n return psutil.Process(pid)\n except psutil.NoSuchProcess as e:\n if die:\n raise e\n else:\n return None", "def nxlib_pid():\n nxlib_procc = Popen(['python3', '-u', helper_file], stdin=PIPE, stdout=PIPE,\n universal_newlines=True, bufsize=1)\n\n time.sleep(20) # wait for 10 secs, to make sure the lib is loaded successfully\n assert nxlib_procc is not None, \"Could not start nxlib subprocess\"\n return nxlib_procc", "def _get_pid(self, call_info):\n unique_name = call_info['sender']\n return self._dbus_proxy.GetConnectionUnixProcessID(unique_name)", "def pfind(pid):\n for p in list_foreach(\"allproc\", \"p_list\"):\n if p['p_pid'].cast(gdb.lookup_type(\"int\")) == pid:\n return p\n raise gdb.error(\"No process with pid {} exists\".format(pid))", "def get_token_information(self):\n GetTokenInformation = ctypes.windll.advapi32.GetTokenInformation\n GetTokenInformation.argtypes = [\n wintypes.HANDLE, # TokenHandle\n ctypes.c_uint, # TOKEN_INFORMATION_CLASS value\n wintypes.LPVOID, # TokenInformation\n wintypes.DWORD, # TokenInformationLength\n ctypes.POINTER(wintypes.DWORD), # ReturnLength\n ]\n GetTokenInformation.restype = wintypes.BOOL\n\n CopySid = ctypes.windll.advapi32.CopySid\n CopySid.argtypes = [\n wintypes.DWORD, # nDestinationSidLength\n ctypes.c_void_p, # pDestinationSid,\n ctypes.c_void_p # pSourceSid\n ]\n CopySid.restype = wintypes.BOOL\n\n GetLengthSid = ctypes.windll.advapi32.GetLengthSid\n GetLengthSid.argtypes = [\n ctypes.POINTER(SID) # PSID\n ]\n GetLengthSid.restype = wintypes.DWORD\n\n return_length = wintypes.DWORD(0)\n buffer = ctypes.create_string_buffer(SECURITY_MAX_SID_SIZE)\n\n res = GetTokenInformation(self.get_process_token(),\n TOKEN_INFORMATION_CLASS.TokenUser,\n buffer,\n SECURITY_MAX_SID_SIZE,\n ctypes.byref(return_length)\n )\n assert res > 0, \"Error in second GetTokenInformation (%d)\" % res\n\n token_user = ctypes.cast(buffer, ctypes.POINTER(TOEKN_USER)).contents\n CopySid(SECURITY_MAX_SID_SIZE,\n self.identity.Value.AccountSid.Data,\n token_user.User.Sid\n )\n self.identity.Type = WINBIO_ID_TYPE_SID\n self.identity.Value.AccountSid.Size = GetLengthSid(token_user.User.Sid)", "def pidof(processname = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname + \".pid\")\n if processname is not None and os.path.exists(pidpath):\n f = open (pidpath)\n pids = f.readlines()\n f.close()\n return pids\n else:\n return False", "def get_phandle(self, phandle):\n if self.is_root():\n return self.phandles[phandle]\n else:\n self.parent.get_phandle(phandle)", "def pidExists(self, pid):\n\n prochash = self.getHash( 'datahash' ) # safely get copy of process dict\n\n try:\n prochash[pid]\n return 1\n except KeyError:\n return 0", "def pidExists(self, pid):\n\n prochash = self.getHash( 'datahash' ) # safely get copy of process dict\n\n try:\n prochash[pid]\n return 1\n except KeyError:\n return 0", "def pid(self):\n if self.proc is None:\n return 0\n return self._pid()", "def GetCpuStats(self, pid):\n class ProcTaskInfo(ctypes.Structure):\n \"\"\"Struct for proc_pidinfo() call.\"\"\"\n _fields_ = [(\"pti_virtual_size\", ctypes.c_uint64),\n (\"pti_resident_size\", ctypes.c_uint64),\n (\"pti_total_user\", ctypes.c_uint64),\n (\"pti_total_system\", ctypes.c_uint64),\n (\"pti_threads_user\", ctypes.c_uint64),\n (\"pti_threads_system\", ctypes.c_uint64),\n (\"pti_policy\", ctypes.c_int32),\n (\"pti_faults\", ctypes.c_int32),\n (\"pti_pageins\", ctypes.c_int32),\n (\"pti_cow_faults\", ctypes.c_int32),\n (\"pti_messages_sent\", ctypes.c_int32),\n (\"pti_messages_received\", ctypes.c_int32),\n (\"pti_syscalls_mach\", ctypes.c_int32),\n (\"pti_syscalls_unix\", ctypes.c_int32),\n (\"pti_csw\", ctypes.c_int32),\n (\"pti_threadnum\", ctypes.c_int32),\n (\"pti_numrunning\", ctypes.c_int32),\n (\"pti_priority\", ctypes.c_int32)]\n PROC_PIDTASKINFO = 4\n def __init__(self):\n self.size = ctypes.sizeof(self)\n super(ProcTaskInfo, self).__init__() # pylint: disable=bad-super-call\n\n proc_info = ProcTaskInfo()\n if not self.libproc:\n self.libproc = ctypes.CDLL(ctypes.util.find_library('libproc'))\n self.libproc.proc_pidinfo(pid, proc_info.PROC_PIDTASKINFO, 0,\n ctypes.byref(proc_info), proc_info.size)\n\n # Convert nanoseconds to seconds.\n cpu_time = (proc_info.pti_total_user / 1000000000.0 +\n proc_info.pti_total_system / 1000000000.0)\n results = {'CpuProcessTime': cpu_time,\n 'ContextSwitches': proc_info.pti_csw}\n\n # top only reports idle wakeup count starting from OS X 10.9.\n if self.GetOSVersionName() >= os_version_module.MAVERICKS:\n results.update({'IdleWakeupCount': self._GetIdleWakeupCount(pid)})\n return results", "def getstatusoutput(*args, **kwargs):\n p = subprocess.Popen(*args, **kwargs)\n stdout, stderr = p.communicate()\n return (p.returncode, stdout, stderr)", "def getPID(self) -> \"Optional[str]\":\n the_pid: \"Optional[str]\"\n if self.id is not None:\n the_pid = str(self.id)\n parsedRepoURL = urllib.parse.urlparse(the_pid)\n\n # If it is not an URI / CURIE\n if parsedRepoURL.scheme == \"\":\n if (self.trs_endpoint is not None) and len(self.trs_endpoint) > 0:\n parsedTRSURL = urllib.parse.urlparse(self.trs_endpoint)\n trs_steps: \"Sequence[str]\" = parsedTRSURL.path.split(\"/\")\n pid_steps = [\"\", urllib.parse.quote(the_pid, safe=\"\")]\n\n if self.version_id is not None:\n pid_steps.append(\n urllib.parse.quote(str(self.version_id), safe=\"\")\n )\n\n the_pid = urllib.parse.urlunparse(\n urllib.parse.ParseResult(\n scheme=TRS_SCHEME_PREFIX,\n netloc=parsedTRSURL.netloc,\n path=\"/\".join(pid_steps),\n params=\"\",\n query=\"\",\n fragment=\"\",\n )\n )\n else:\n self.logger.debug(\"trs_endpoint was not provided\")\n the_pid = None\n else:\n the_pid = None\n\n return the_pid", "def get_userid():\n return _userid()", "async def info_timer(self, handle: str) -> Union[tuple, None]:\n return await self.AD.sched.info_timer(handle, self.name)", "def is_pid_valid(pid):\n return get_pid_status(pid)[1]", "def get_did_ident(profile: Profile) -> Optional[str]:\n did_ident = None\n if profile.settings.get(\"log.file\"):\n\n async def _fetch_did() -> Optional[str]:\n async with profile.session() as session:\n wallet = session.inject(BaseWallet)\n req_did_info: DIDInfo = await wallet.get_public_did()\n if not req_did_info:\n req_did_info: DIDInfo = (await wallet.get_local_dids())[0]\n if req_did_info:\n did_ident = req_did_info.did\n return did_ident\n\n loop = asyncio.get_event_loop()\n did_ident = loop.run_until_complete(_fetch_did())\n return did_ident", "def test_process_parent_id():\n output = sh.process_parent_id()\n assert isinstance(output, int) and output > 0" ]
[ "0.62232584", "0.60516447", "0.5674809", "0.5673514", "0.55722266", "0.5526226", "0.548995", "0.548995", "0.5366021", "0.53371155", "0.53033525", "0.52723205", "0.5250137", "0.5207038", "0.5207038", "0.51987016", "0.5190687", "0.51559055", "0.5148344", "0.5127615", "0.5126017", "0.510105", "0.50635165", "0.50448734", "0.503443", "0.50194347", "0.4993677", "0.4962815", "0.49323043", "0.4895848", "0.4873731", "0.48402676", "0.4834868", "0.4831316", "0.48209804", "0.4814245", "0.4813296", "0.48026112", "0.47993833", "0.4794001", "0.4787458", "0.47827217", "0.4763664", "0.476232", "0.4758407", "0.474807", "0.47473732", "0.4728696", "0.47188702", "0.4717316", "0.47014663", "0.4700294", "0.46990028", "0.46943754", "0.4680621", "0.46638864", "0.46616015", "0.46591052", "0.46553847", "0.46547902", "0.46547902", "0.46547902", "0.46547064", "0.4647274", "0.46471778", "0.46389347", "0.4633361", "0.46321252", "0.46289375", "0.46240866", "0.46198598", "0.4616078", "0.46021083", "0.45975077", "0.45867044", "0.45696783", "0.4561241", "0.45389426", "0.45385247", "0.45337123", "0.45292625", "0.45244446", "0.45180354", "0.45051447", "0.450229", "0.4496962", "0.4486939", "0.44855812", "0.44845107", "0.44833675", "0.44769347", "0.44769347", "0.44726145", "0.44721264", "0.4465373", "0.4458217", "0.44565347", "0.44542283", "0.4448298", "0.4443278", "0.44391796" ]
0.0
-1
Polls the process handle to see if it has finished (True) or not (False).
def processPollByHandle(hProcess): try: dwWait = win32event.WaitForSingleObject(hProcess, 0); # pylint: disable=no-member except: reporter.logXcpt('hProcess=%s %#x' % (hProcess, hProcess,)); return True; return dwWait != win32con.WAIT_TIMEOUT; #0x102; #
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_done(self) -> bool:\n is_done = self._process.poll() is not None\n\n return is_done", "def check_finish(self):\r\n return not self.proc.is_alive()", "def _proc_is_alive(self):\n if self._proc is None:\n return False\n\n return self._proc.poll() is None", "def poll_process_done(self) -> None:\n while len(self.process_queue) >= self.max_processes:\n self.check_process_done()", "def wait_process_running(process):\n assert process.is_running()", "def wait_until_finished(self) -> None:\n if not self._parent_signal_conn:\n raise ValueError(\"Process not started.\")\n if self._async_mode:\n raise RuntimeError(\"wait_until_finished should only be called in sync_mode\")\n while self._parent_signal_conn.poll(timeout=None):\n try:\n result = self._parent_signal_conn.recv()\n except EOFError:\n return\n self._process_message(result)\n if isinstance(result, DagParsingStat):\n # In sync mode (which is the only time we call this function) we don't send this message from\n # the Manager until all the running processors have finished\n return", "def is_running(self):\n if self.__process.poll() is not None: # process has ended\n for nbsr in (\"stdout\", \"stderr\"):\n getattr(self, nbsr).finalise()\n return False\n return True", "def alive(self):\n return self._proc is not None and self._proc.poll() is None", "def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)", "def wait_finish(self):\r\n self.proc.join()", "def check_process_full(self) -> None:\n if len(self.process_queue) >= self.max_processes:\n task_name, sp = self.process_queue.pop()\n sp.wait()", "def wait_all_process_done(self) -> None:\n while len(self.process_queue) > 0:\n self.check_process_done()", "def _poll_process(self, box_config):\n try:\n p = psutil.Process(box_config.pid)\n\n return_code = p.wait(timeout=0.01)\n if return_code is None:\n # process is already terminated\n self.logger.info(f'Process {box_config.process_name} is terminated')\n return\n else:\n # process is terminated; possibly by OS\n box_config.pid = None\n self.bc_dao.update(box_config)\n self.logger.info(f'Process {box_config.process_name} got terminated. Cleaning up')\n except TimeoutExpired:\n # process is alive and OK\n pass\n except Exception:\n self.logger.error(f'Exception on polling: {box_config.process_name}', exc_info=True)", "def __bool__(self):\n return self.wait(0)", "def alive(self):\n\n return self.subprocess.poll() is None and not self.thread_stop.is_set()", "def waitUntilSubprocessLaunched(self):\n\n def hasLaunched():\n return self._has_launched\n\n with self._has_launched_cv:\n self._has_launched_cv.wait_for(hasLaunched)\n assert self._has_launched", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def has_finished():", "def get_status(self) -> bool:\n try:\n self.__driver.service.assert_process_still_running()\n return True\n except AttributeError:\n return False", "def on_timeout_cb(self):\n returncode = self.process.poll()\n if returncode is None:\n self.progress_bar.pulse()\n return True\n\n self.response(gtk.RESPONSE_ACCEPT)\n return False", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def is_running(self):\n if self._process and self._process.poll() is None:\n return True\n return False", "def is_done(self):\n\n return not self.thread.is_alive()", "def wait(self):\n self.Popen.wait()", "def check_process_done(self):\n for task_name, sp in self.process_queue:\n if sp.poll() is not None:\n self.process_queue.remove((task_name, sp))\n print(definitions.PRINT_CODES[0] + blue(\"Query done: \"), blue(task_name))\n if self.handler is not None:\n self.handler(task_name, sp.returncode)", "def _wait_for_output(self):\n # Here we should get an empty list or list with a tuple [(fd, event)]\n # When we get list with a tuple we can use readline method on\n # the file descriptor.\n poll_result = self.poll_obj.poll(0)\n\n if poll_result:\n line = self.output().readline()\n if self._banner.match(line):\n return True\n\n return False", "def _wait(self,):\n #modlogger.debug( \"%s: waiting\"%self)\n self.closing = True\n with self.not_complete_lock:\n if not self.not_complete: return\n self._checkpoint()", "def is_running(self):\n if self._process:\n return self._process.poll() is None\n else:\n return False", "def processed(self):\n if self.is_waitable():\n with self._condition:\n self._processed = True\n self._condition.notify_all()\n else:\n self._processed = True", "async def wait(self):\n if self.poll() is None:\n await wait_child_exiting(self)\n self._proc.wait()\n else:\n await _core.checkpoint()\n return self.returncode", "def poll_process(process, suppress_errors=False):\n\n while True:\n data_to_stdout(\".\")\n time.sleep(1)\n\n returncode = process.poll()\n\n if returncode is not None:\n if not suppress_errors:\n if returncode == 0:\n data_to_stdout(\" done\\n\")\n elif returncode < 0:\n data_to_stdout(\" process terminated by signal %d\\n\" % returncode)\n elif returncode > 0:\n data_to_stdout(\" quit unexpectedly with return code %d\\n\" % returncode)\n\n break", "def wait_process_completion(remote_command_executor, pid):\n logging.info(\"Waiting for performance test to complete\")\n command = f\"\"\"\n ps --pid {pid} > /dev/null\n [ \"$?\" -ne 0 ] && echo \"COMPLETE\" || echo \"RUNNING\"\n \"\"\"\n result = remote_command_executor.run_remote_command(command)\n if result.stdout == \"RUNNING\":\n raise Exception(\"The process is still running\")\n else:\n return result.stdout.strip()", "def _is_alive(self, pid):\n process = next(x for x in self._processes if x.pid == pid)\n return process.is_alive()", "def _isSubProcessRunning(self): \n # Check if child process has terminated. Set and return returncode attribute.\n if self.__process.poll() is None:\n return True\n else:\n return False", "def _server_poll_expcompleted_(self):\n #print \"class Princeton_CCD function _server_poll_expcompleted_\" \n try:\n last_state = self.polled_running\n except (AttributeError,UnboundLocalError):\n self.polled_running = False\n last_state = False\n self.polled_running = self.query_running()\n if (not bool(last_state) and bool(self.polled_running)):\n self.begin_acq_time = time.time()\n #print self.query_running(), last_state\n #if ((last_state == True) and (self.polled_running == False)): CP\n if (bool(last_state) and not bool(self.polled_running)):\n self.end_acq_time = time.time()\n return True\n else:\n return False", "def _try_finish(self, wait):\n counter = 0\n while not self._stop_signal and counter < wait:\n time.sleep(1)\n counter += 1\n res = self.process.poll()\n res_bsr = self.process_bsr.poll() if self.process_bsr else True\n if res is not None and res_bsr is not None:\n logger.info('Finish try succeeded')\n self.return_code = res\n time.sleep(15)\n return True\n else:\n logger.warning('Killing scrapy process manually, task id is %s',\n self.task_data.get('task_id', 0))\n # kill process group, if not finished in allowed time\n if self.process_bsr:\n try:\n self.process_bsr.terminate()\n except OSError as e:\n logger.error('Kill process bsr error in task #%s: %s',\n self.task_data.get('task_id', 0), e)\n try:\n self.process.terminate()\n except OSError as e:\n logger.error('Kill process error in task #%s: %s',\n self.task_data.get('task_id', 0), e)\n return False", "def isFinished():", "def isFinished():", "def isFinished():", "async def wait(self):\n if self._state in (JobState.PENDING, JobState.RUNNING):\n await self._process.wait()", "async def wait_until_done(self) -> None:\n ...", "def ServeUntilSubprocessDies(self, process):\n child_result = 0\n try:\n while True:\n if process.poll() is not None:\n child_result = 0\n break\n if self.conn.poll():\n child_result = self.conn.recv()\n break\n time.sleep(0)\n except KeyboardInterrupt:\n pass\n finally:\n self.Shutdown()\n return child_result", "def wait(self):\n while self._worker is None:\n # wait() before self._run()\n time.sleep(0.1)\n self._worker.join()\n return self.poll()", "def finalize(self):\n self.busy = False\n self.pipe_start.send((\"FINISH\",None))\n self.process.join()\n if self.process.is_alive():\n self.process.terminate()", "def wait_process(pid, timeout=None):\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n # Process is dead\n return True\n else:\n # Process is still ticking\n return None\n\n return wait_condition(process, timeout)", "def wait_process(pid, timeout=None):\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n # Process is dead\n return True\n else:\n # Process is still ticking\n return None\n\n return wait_condition(process, timeout)", "def wait_to_complete(self, timeout: float = 5) -> None:\n if self.proc.poll() is not None: # type: ignore\n return\n\n start_time = time.time()\n\n while start_time + timeout > time.time() and self.proc.poll() is None: # type: ignore\n time.sleep(0.001)\n\n if self.proc.poll() is None: # type: ignore\n self.terminate(force=True)\n self.wait()\n self.exitstatus = \"Terminated!\" # type: ignore", "async def poll(self):\n\n if not self.pid:\n # no pid, not running\n self.clear_state()\n return 0\n\n # send signal 0 to check if PID exists\n alive = await self.remote_signal(0)\n self.log.debug(\"Polling returned {}\".format(alive))\n\n if not alive:\n self.clear_state()\n return 0\n else:\n return None", "def wait_for_completion(self):\n self.logger.debug(\"Waiting for completion\")\n finished = False\n while not finished:\n if self._all_workers_are_idle():\n self.logger.info(\"Finished\")\n finished = True", "def waiting(self) -> bool: # pylint: disable=W0221\n return True", "def is_call_waiting(self) -> bool:", "def wait(self):\n return (self.status == self.STATUS_WAIT)", "def _block_until_process_is_killed(self, pid: int, timeout: int=None) -> bool:\n def check_pid():\n \"\"\"\n Check For the existence of a unix pid.\n \"\"\"\n # Sending signal 0 to a pid will raise an OSError exception if the pid is not running,\n # and do nothing otherwise.\n try:\n os.kill(pid, 0)\n except OSError:\n return False\n return True\n\n return poll.wait_for(check_pid, timeout_seconds=timeout)", "def isRunning(self):\n if not self.running:\n return False\n elif self.process.poll() == 0 or self.process.returncode >= 0:\n return False\n else:\n return True", "def _wait_queue(self):\n while True:\n time.sleep(0.1)\n if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():\n return", "def wait_on_job(self, delay=10):\n while self.isJobRunning() == True:\n time.sleep(delay)\n return self.ofile_exists()", "def wake_up_if_possible(self):\n if self.state == Process.RUNNING:\n return True\n\n if (self.state in (Process.SLEEPING, Process.WAITING) and\n self.suspended_until is not None):\n current_time = self.current_evaluation_context.get_current_time()\n if current_time >= self.suspended_until:\n self.__state = Process.RUNNING\n self.__suspended_until = None\n self.__interrupted = True\n return True\n\n if self.state == Process.WAITING and self.blocking_condition is not None:\n condition_type, condition, params = self.blocking_condition\n if self.current_evaluation_context is None:\n raise SALMAException(\"Undefined evaluation context in process!\")\n res = self.current_evaluation_context.evaluateCondition(condition_type, condition, *params)\n if res:\n self.__state = Process.RUNNING\n self.__blocking_condition = None\n self.__suspended_until = None\n return True\n return False", "def running(self):\n return self.sub_process and self.sub_process.is_alive()", "def _wait_empty(self):\n while True:\n if self.queue.empty():\n # We still have to wait for the last queue item being processed\n # (queue.empty() returns True before queue.task_done() is\n # called).\n self.queue.join()\n return\n time.sleep(1)", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def waitForSubprocessNotRunning(self):\n if not self._has_launched or not self._server_thread.is_alive():\n return\n self._server_thread.join()\n\n def hasLaunched():\n return self._has_launched\n\n with self._has_launched_cv:\n self._has_launched_cv.wait_for(hasLaunched)\n assert self._has_launched", "async def wait(self) -> Optional[int]:\n # If we have a local_proc, call its wait method. This will cleanup any defunct processes when the kernel\n # is shutdown (when using waitAppCompletion = false). Otherwise (if no local_proc) we'll use polling to\n # determine if a (remote or revived) process is still active.\n if self.local_proc:\n return self.local_proc.wait()\n\n poll_val = 0\n for i in range(max_poll_attempts):\n poll_val = await self.poll()\n if poll_val is None:\n await asyncio.sleep(poll_interval)\n else:\n break\n else:\n self.log.warning(\"Wait timeout of {} seconds exhausted. Continuing...\".\n format(max_poll_attempts * poll_interval))\n return poll_val", "def IsCompleted(self) -> bool:", "def IsCompleted(self) -> bool:", "def IsCompleted(self) -> bool:", "def isFinished(self):\r\n try:\r\n output = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n if self.jobId in output:\r\n if output.split()[4] == \"Eqw\":\r\n #If the job fails, print a warning, and wait a minute so the user can check why the job fails,\r\n #before resubmitting the job.\r\n logging.warning(\"job \" + output.split()[2] + \" failed to run, resubmitting in one minute\")\r\n time.sleep(60)\r\n output = Popen(\"qdel \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n self.submit()\r\n return False\r\n else:\r\n logging.info(\"job with ID: \" + self.jobId + \" is finished.\")\r\n return True\r\n \r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def is_ready(self):\n return self.prep_job.is_done()", "def block_until_close(self):\r\n return self._eventThread.join()", "def poll(self):\n return False", "def alive(self):\n return self._process.is_alive()", "def _defunct(self):\n while self._popen.poll() is None:\n time.sleep(0.1)", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def is_polling_done(self):\n if self.message_request_more:\n return False\n \n if self.message_cache:\n return False\n \n return True", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def proc_is_alive(pid):\n handle = windll.kernel32.OpenProcess(\n win32con.SYNCHRONIZE | win32con.PROCESS_QUERY_INFORMATION, 0, pid)\n if handle == 0:\n return False\n\n # If the process exited recently, a pid may still exist for the handle.\n # So, check if we can get the exit code.\n exit_code = DWORD()\n rval = windll.kernel32.GetExitCodeProcess(handle, byref(exit_code))\n windll.kernel32.CloseHandle(handle)\n if rval == 0: # GetExitCodeProcess failure\n raise WinError()\n return exit_code.value == win32con.STILL_ACTIVE", "def wait_rc(popen, timeout=30):\n stop = False\n end_time = time.time() + timeout\n rc = None\n while not stop:\n rc = popen.poll()\n if time.time() > end_time:\n stop = True\n return rc\n if rc is not None:\n stop = True\n return rc\n else:\n time.sleep(0.5)", "def wait_until_running(self, timeout=None):\n if self.pending():\n with self._running_condition:\n self._running_condition.wait(timeout)\n return not self.pending()", "def read(self):\n if self.alive:\n with self._register_poll():\n with _unblock_read(self._proc):\n return self._yield_ready_read()\n else:\n raise ProcessIsDeadError('Can not read. The process is already dead.')", "def watch_process(self):\n psutil.wait_procs([psutil.Process(self._proc.pid)],\n callback=self.start)", "def check(self):\n\n if not self.running:\n return False\n\n # On my machine, os.kill is faster and takes ~0.3usec while os.stat and P.exists take ~1.5usec (using timeit)\n # However, with kill if the process is under a separate UID, PermissionError is raised\n # Could try os.kill and fallback to P.exists and save the choice, but that's just overcomplicated\n\n running = P.exists(self.path)\n if running:\n self.update_status()\n else:\n # Process ended since last check, recond end time\n self.running = False\n self.ended_datetime = datetime.now()\n # TODO duration attribute could have a value while running; update in getter method\n self.duration = self.ended_datetime - self.created_datetime\n # Formats like 3:06:29.873626, so cutoff microseconds\n text = str(self.duration)\n self.duration_text = text[:text.rfind('.')]\n\n return running", "def processExists(uPid):\n # We try open the process for waiting since this is generally only forbidden in a very few cases.\n try:\n hProcess = win32api.OpenProcess(win32con.SYNCHRONIZE, False, uPid); # pylint: disable=no-member\n except pywintypes.error as oXcpt: # pylint: disable=no-member\n if oXcpt.winerror == winerror.ERROR_INVALID_PARAMETER:\n return False;\n if oXcpt.winerror != winerror.ERROR_ACCESS_DENIED:\n reporter.logXcpt('uPid=%s oXcpt=%s' % (uPid, oXcpt));\n return False;\n reporter.logXcpt('uPid=%s oXcpt=%s' % (uPid, oXcpt));\n except Exception as oXcpt:\n reporter.logXcpt('uPid=%s' % (uPid,));\n return False;\n else:\n hProcess.Close(); #win32api.CloseHandle(hProcess)\n return True;", "def is_finished(self):\n self.refresh()\n return self.progress.remaining_budget is not None and self.progress.remaining_budget <= 0", "def wait(self):\n self.__prcs.wait()\n return self.poll()", "def _is_working():\n global _worker\n return _worker is not None and _worker.is_alive()", "def wait_process_termination(p_pid):\n try:\n _, stdout, _ = run_cmd(\"ps --pid {} -o comm=\".format(p_pid))\n except ChildProcessError:\n return\n raise Exception(\"{} process is still alive: \".format(stdout.strip()))", "def wait_message(self):\n if self._state != states['open']:\n return False\n if len(self._read_queue) > 0:\n return True\n\n assert self._read_waiter is None or self._read_waiter.cancelled(), \\\n \"You may only use one wait_message() per connection.\"\n\n self._read_waiter = asyncio.Future(loop=self._loop)\n yield from self._read_waiter\n return self.wait_message()", "def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))", "def is_run_completed(self):\n run_state = self._get_run_state()\n process = self._get_process()\n if any(state in run_state for state in STATE_DISHWASHER_END) or (\n STATE_DISHWASHER_POWER_OFF in run_state\n and any(state in process for state in STATE_DISHWASHER_END)\n ):\n return True\n return False", "def loop_wait(self):\n self.log_debug(\"Waiting for loop to finish\")\n if self.loop_state() != LState.Stopped:\n self.event_loop_proc.Wait()\n self.log_debug(\"Loop finished\")", "def wait():\r\n win32event.WaitForSingleObject(hProcess,\r\n win32event.INFINITE)\r\n returncode = win32process.GetExitCodeProcess(hProcess)\r\n return returncode", "def check_job(self, a_thread, _):\n if not a_thread.isAlive():\n self.close_button.disabled = False\n self.popup_label.text = \"Process finished. Processed records:\" + str(self.count_funct())\n return False", "def check_process_status(self, popenObj):\n if not isinstance(popenObj, subprocess.Popen):\n self.logger.error(\n \"Cannot terminate a process since the arg is not Popen object.\")\n return False, -1\n\n popenObj.poll()\n retcode = popenObj.returncode\n\n if retcode is None:\n return self.PROCESSSTATE_ALIVE\n return self.PROCESSSTATE_DEAD", "def check_obj_ref_ready_nowait(obj_ref: ObjectRef) -> bool:\n finished, _ = ray.wait([obj_ref], timeout=0)\n return len(finished) == 1", "def wait(self):\n num_pings = 0\n # Some streams seem to start fine with up to 4 pings before beginning download?\n # More investigation is needed\n max_pings = 1 + self._pingouts\n # timeout after 1 minute\n timeout = datetime.datetime.now() + datetime.timedelta(minutes=1)\n try:\n for line in self._process.stderr:\n # TODO: add mpegts or other variants depending on the container settings? or no?\n # if \"Output #0, mp4\" in line:\n if \"Output #0\" in line:\n self._process.communicate()\n self.move_to_dest()\n self._pingouts = 0\n break\n elif \"HandleCtrl, Ping\" in line:\n num_pings += 1\n if num_pings > max_pings:\n # The main issue with this is that the slain processes will not have their files moved\n # But I think this is preferable to the other solutions I've come up with.\n # For future reference, those were:\n #\n # 1) Sending SIGINT then continuing to read stderr until it exited (sometimes it doesn't)\n # 2) Sending SIGINT, storing a reference to the process, then restarting the download.\n # This prevents the process from being garbage collected until the Watcher is\n # 3) Sending SIGINT, then storing info about src and dest paths for the stopped download.\n # If a reference to the process is NOT stored, there's no way to be sure it has finished writing\n # (if it's writing at all). The only way was to give them a grace period and then just start\n # moving, but this adds undesirable time to the cleanup phase, when we may want to restart\n # a falsely completed Watcher asap.\n # 4) Just moving the file straightaway. This is obviously bad since ffmpeg takes a few moments to\n # finish.\n # NOTE: only option #1 was actually tried, the others were partially written before being\n # abandoned as their problems became clear\n #\n # Two additional options exist (not mutually exclusive):\n # 1) Passing the dead processes off to a queue and having another thread clean up.\n # 2) Having regular maintenance sweep the active folder and move files it can be sure are done\n # to their proper folders.\n #\n # I *probably* need to use 1) eventually, especially once I figure out how to actually end\n # stuck processes without killing the parent. But it requires a lot more code.\n # Until then let's just see how this works.\n #\n # When that time does come, a Downloader copy constructor may be useful.\n download_logger.debug(\"Download pinged {} times: Stopping\".format(num_pings))\n self._pingouts += 1\n self.stop()\n\n # close stderr to force the loop to exit\n time.sleep(0.1)\n self._process.stderr.close()\n time.sleep(0.1)\n # process will be garbage collected when the next one is started, or the Watcher dies\n # self._process = None\n # This *should* work for newer builds of FFmpeg without librtmp.\n # Only question is whether 1 minute is too long (or too short).\n # UPDATE: Why doesn't this ever seem to work?\n # is it because FFmpeg freezes output and hangs now? so we're never getting another line to iterate over\n # elif datetime.datetime.now() > timeout:\n # download_logger.debug(\"Download of {} timed out\".format(self.outfile))\n # self.stop()\n # time.sleep(0.1)\n # self._process.stderr.close()\n # time.sleep(0.1)\n else:\n time.sleep(0.2)\n\n except ValueError:\n download_logger.debug('ffmpeg stderr closed unexpectedly')\n\n # Is it possible for the process to end prematurely?\n return self._process.returncode", "def isFinished(self):\n return False" ]
[ "0.7556547", "0.7349174", "0.7095015", "0.70851445", "0.6985076", "0.6930272", "0.68558294", "0.6798407", "0.6784719", "0.6780242", "0.6755652", "0.66265666", "0.6621324", "0.658647", "0.65851134", "0.65569645", "0.6533104", "0.6499808", "0.64819217", "0.6480319", "0.6417932", "0.6417932", "0.6417932", "0.6417932", "0.6415494", "0.64149743", "0.640869", "0.6406878", "0.63671774", "0.6352831", "0.6342143", "0.6339791", "0.6332772", "0.63271713", "0.6301106", "0.62643206", "0.6257073", "0.6239994", "0.6239562", "0.62384576", "0.62384576", "0.62384576", "0.6229733", "0.6207774", "0.62030846", "0.61972743", "0.61489886", "0.61413693", "0.61413693", "0.613656", "0.6134664", "0.6120893", "0.6116992", "0.61065817", "0.61030394", "0.6098713", "0.6091377", "0.6089057", "0.6082875", "0.60745275", "0.60657793", "0.6057038", "0.60502404", "0.6039374", "0.6039355", "0.6037981", "0.60260516", "0.59861845", "0.59861845", "0.59861845", "0.59750473", "0.59708893", "0.59671533", "0.5963156", "0.59465384", "0.59457076", "0.5939634", "0.5937102", "0.59367704", "0.5929113", "0.592599", "0.5917407", "0.5899816", "0.58881676", "0.58879745", "0.5886182", "0.5885726", "0.58803874", "0.5879545", "0.5869106", "0.5860107", "0.58577955", "0.5855886", "0.58553916", "0.58478445", "0.58445615", "0.58405304", "0.58306605", "0.58236754", "0.5822145" ]
0.7235497
2
Logs windows memory stats.
def logMemoryStats(): class MemoryStatusEx(ctypes.Structure): """ MEMORYSTATUSEX """ kaFields = [ ( 'dwLength', ctypes.c_ulong ), ( 'dwMemoryLoad', ctypes.c_ulong ), ( 'ullTotalPhys', ctypes.c_ulonglong ), ( 'ullAvailPhys', ctypes.c_ulonglong ), ( 'ullTotalPageFile', ctypes.c_ulonglong ), ( 'ullAvailPageFile', ctypes.c_ulonglong ), ( 'ullTotalVirtual', ctypes.c_ulonglong ), ( 'ullAvailVirtual', ctypes.c_ulonglong ), ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ), ]; _fields_ = kaFields; # pylint: disable=invalid-name def __init__(self): super(MemoryStatusEx, self).__init__(); self.dwLength = ctypes.sizeof(self); try: oStats = MemoryStatusEx(); ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats)); except: reporter.logXcpt(); return False; reporter.log('Memory statistics:'); for sField, _ in MemoryStatusEx.kaFields: reporter.log(' %32s: %s' % (sField, getattr(oStats, sField))); return True;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_memory_stats(location_tag=\"undef\"):\n try:\n import psutil\n p = psutil.Process(os.getpid())\n rm, vm = p.get_memory_info()\n print \"MEM_STAT (%s) rm=%s, vm=%s\" % (location_tag, rm, vm)\n except ImportError:\n print \"psutil module not available\"", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def memory_snapshot(tag, rank):\n GB = 1024 * 1024 * 1024\n MB = 1024 * 1024\n KB = 1024\n\n peak = dgl.partition.get_peak_mem() * KB\n mem = psutil.virtual_memory()\n avail = mem.available / MB\n used = mem.used / MB\n total = mem.total / MB\n\n mem_string = f\"{total:.0f} (MB) total, {peak:.0f} (MB) peak, {used:.0f} (MB) used, {avail:.0f} (MB) avail\"\n logging.debug(f\"[Rank: {rank} MEMORY_SNAPSHOT] {mem_string} - {tag}\")", "def show_process_memory( cls, call_msg = \"\", log_level = None, print_it = False ):\n process = psutil.Process(os.getpid()) # import psutil\n mem = process.memory_info().rss\n # convert to mega and format\n mem_mega = mem/( 1e6 )\n msg = f\"{call_msg}process memory = {mem_mega:10,.2f} mega bytes \"\n if print_it:\n print( msg )\n if not ( log_level is None ):\n cls.__logger.log( log_level, msg )\n msg = f\"{mem_mega:10,.2f} mega bytes \"\n return ( mem, msg )", "def print_memory_diags(disable_print=False):\n process = psutil.Process(os.getpid())\n memory = process.memory_info().rss/1000000000.0\n if not disable_print:\n logging.info('\\tMemory usage: {:.3f} GB'.format(memory))\n return memory", "def _mem_report(tensors: Iterable, mem_type: str) -> None:\n print(f\"Storage on {mem_type}\")\n print(\"-\" * LEN)\n total_numel = 0\n total_mem = 0\n visited_data: List[Any] = []\n for tensor in tensors:\n if tensor.is_sparse:\n continue\n # a data_ptr indicates a memory block allocated\n data_ptr = tensor.storage().data_ptr()\n if data_ptr in visited_data:\n continue\n visited_data.append(data_ptr)\n\n numel = tensor.storage().size()\n total_numel += numel\n element_size = tensor.storage().element_size()\n mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte\n total_mem += mem\n element_type = type(tensor).__name__\n size = tuple(tensor.size())\n\n if print_all:\n print(f\"{element_type}\\t\\t{size}\\t\\t{mem}\")\n print(\"-\" * LEN)\n print(f\"Total Tensors: {total_numel} \\tUsed Memory Space: {total_mem}\")\n print(\"-\" * LEN)", "def reset_memory_statistics(sender, **kwargs): # pylint: disable=unused-argument\n MemoryUsageData.start_counting()", "def record_memory_map(self):\n memory_map = self.get_memory_map()\n self._memory_map_records.append(memory_map)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def show_mem_usage():\n gl = sys._getframe(1).f_globals\n vars = {}\n for k, v in list(gl.items()):\n # for pandas dataframes\n if hasattr(v, 'memory_usage'):\n mem = v.memory_usage(deep=True)\n if not np.isscalar(mem):\n mem = mem.sum()\n vars.setdefault(id(v), [mem]).append(k)\n # work around for a bug\n elif isinstance(v, pd.Panel):\n v = v.values\n vars.setdefault(id(v), [sys.getsizeof(v)]).append(k)\n total = 0\n for k, (value, *names) in vars.items():\n if value > 1e6:\n print(names, \"%.3fMB\" % (value / 1e6))\n total += value\n print(\"%.3fMB\" % (total / 1e6))", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def get_memory_info():\n return psutil.virtual_memory()", "def mem_info(self):\n\t\t\tavailable, total = cuda.mem_get_info() #Note: pycuda._driver.LogicError: cuMemGetInfo failed: context is destroyed\n\t\t\tprint(\"Available: %.2f GB\\nTotal: %.2f GB\"%(available/1e9, total/1e9))", "def get_memory_usage(cls):\n\n mem_stats = psutil.virtual_memory()\n\n mem_stats_dict = { StatsKeys.MEMORY :\n {\n StatsKeys.TOTAL : mem_stats.total,\n StatsKeys.AVAILABLE : mem_stats.available,\n StatsKeys.USED : mem_stats.used\n }\n }\n logger.debug(\"Memory stats: {}\".format(mem_stats_dict))\n\n return mem_stats_dict", "def _api_memory_info() -> Dict[str, Any]:\n process = psutil.Process(os.getpid())\n return {k: size(v) for k, v in process.memory_info()._asdict().items()}", "def memory():\n\twith open('/proc/meminfo','r') as mem:\n\t\tret = {}\n\t\ttmp = 0\n\t\tfor i in mem:\n\t\t\tsline = i.split()\n\t\t\tif str(sline[0])=='MemTotal:':\n\t\t\t\tret['total'] = int(sline[1]*1.0e-6)\n\treturn ret", "def show_mem(cmd, cnt, args):\n if cpu is None:\n log(\"Load program first\") \n return\n elif len(cpu.memory) == 0:\n log(\"Load program first\") \n return \n chunk = 0\n chunk_count = len(cpu.memory)\n while chunk < chunk_count: \n chunk_start = cpu.memory[chunk][MEMADDR]\n chunk_end = chunk_start + cpu.memory[chunk][MEMSIZE] \n log(\"{:d} {:#x}..{:#x}\".format(chunk, chunk_start, chunk_end)) \n chunk += 1\n if machine == \"ARM\":\n if len(cpu.high_memory) != 0:\n log(\"High memory\")\n for addr in sorted(cpu.high_memory):\n log(\"{:#x}\".format(addr))", "def check_mem(self, values):\n try:\n virt_mem = psutil.virtual_memory()\n values[keys.KEY_VIRTUAL_MEM_TOTAL] = virt_mem.total\n values[keys.KEY_VIRTUAL_MEM_PERCENT] = virt_mem.percent\n except:\n logging.error(\"Error collecting memory stats.\")", "def get_memory() -> dict:\n import os\n\n import psutil\n\n proc = psutil.Process(os.getpid())\n return proc.memory_info()", "def get_mem_info():\n import psutil\n vm = psutil.virtual_memory()\n return {\n \"memtotal\": vm.total,\n \"memavailable\": vm.available,\n }", "def memory():\n sin = psutil.virtual_memory()\n return round((sin.total / sin.used) / 100, 3)", "def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))", "def dumpMemory():\n libxml2mod.xmlDumpMemory()", "def get_memory(isamAppliance, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\n \"Retrieving the Memory Usage Statistics\",\n \"/statistics/systems/memory.json{0}\".format(\n tools.create_query_string(\n timespan=statistics_duration)),requires_model=requires_model)", "def MemoryInfo(cls):\n\t\tres = {}\n\t\tfor line in cat(\"/proc/meminfo\").split(\"\\n\")[:-1]:\n\t\t\tline = RE_SPACES.sub(\" \", line).strip().split(\" \")\n\t\t\tname, value = line[:2]\n\t\t\tres[name.replace(\"(\", \"_\").replace(\")\", \"_\").replace(\":\", \"\")] = int(value)\n\t\treturn res", "def _debugmallocstats(): # real signature unknown; restored from __doc__\n pass", "def ram(log=False):\n gc.collect()\n freeRam = gc.mem_free()\n allocatedRam = gc.mem_alloc()\n totalRam = freeRam+allocatedRam\n percentage = '{0:.2f} %'.format(freeRam/totalRam*100)\n if (log):\n print('■ Micropython RAM')\n print(' Total : {0:.2f} KB'.format(totalRam/1024))\n print(' Free : {0:.2f} KB'.format(freeRam/1024))\n print(' Free % : {0}'.format(percentage))\n print()\n return freeRam", "def log_memory_usage(func):\n delimiter = ';'\n\n def wrapper(*args, **kwargs):\n if settings.IS_ACTIVE_FEATURE_MEMORY_PROFILER:\n django_process = psutil.Process(pid=os.getpid())\n memory_before = django_process.memory_info().vms\n\n result = func(*args, **kwargs)\n\n if settings.IS_ACTIVE_FEATURE_MEMORY_PROFILER:\n increment = django_process.memory_info().vms - memory_before\n\n if increment:\n called_from = inspect.stack()[1]\n callee_module = inspect.getmodule(called_from[0])\n\n # Don't log any params for sensitive vars, the record type should be sufficient.\n params = callee_module.__name__\n if not hasattr(func, 'sensitive_variables'):\n params = \",\".join(kwargs.values())\n\n logger.info(\n msg=f'{delimiter}{callee_module.__name__}'\n f'{delimiter}{params}'\n f'{delimiter}{memory_before}'\n f'{delimiter}{increment}'\n )\n\n return result\n\n return wrapper", "def register_process_statistics():\n if resource is None:\n log.warning(\n 'Unable to import resource module, memory diags not available'\n )\n return\n\n rusage_fields = [\n ('Execution time in user mode (seconds)', 'ru_utime'),\n ('Execution time in kernel mode (seconds)', 'ru_stime'),\n ('Maximum Resident Set Size (KB)', 'ru_maxrss'),\n ('Soft page faults', 'ru_minflt'),\n ('Hard page faults', 'ru_majflt'),\n ('Input events', 'ru_inblock'),\n ('Output events', 'ru_oublock'),\n ('Voluntary context switches', 'ru_nvcsw'),\n ('Involuntary context switches', 'ru_nivcsw'),\n ]\n\n def dump(log):\n process = resource.getrusage(resource.RUSAGE_SELF)\n for name, field in rusage_fields:\n data = getattr(process, field, 'None')\n log.info('%s: %s', name, data)\n\n register_diags('Process Statistics', dump)", "def get_memory(self):\n return self.loss_memory", "def print_current_mem_usage():\n mem = get_current_mem_usage()\n output = \"# Mem usage = {} MiB #\".format(mem)\n print(\"\\n\" + \"-\" * len(output))\n print(output)\n print(\"-\" * len(output) + \"\\n\")", "def print_mem_usage(usage):\n for region in usage.keys():\n used = usage[region][\"used\"]\n free = usage[region][\"free\"]\n usage_msg = \"{region}:\\n used: {used} bytes\\n free: {free} bytes\"\n usage_msg = usage_msg.format(region=region, used=used, free=free)\n print(usage_msg)", "def get_mem():\n return {\n 'MEM': string_chopped_to_float(psutil.virtual_memory(), 'percent=', ', used'),\n }", "def _get_mem_info(self):\n memory_usage_pct = None\n try:\n memory_usage = self._get_cgroups_current_memory_usage()\n if self._max_memory_usage and memory_usage:\n memory_usage_pct = round((memory_usage / self._max_memory_usage) * 100, 1)\n except BaseException:\n self._log.warning(f'Unable to determine memory usage', exc_info=True)\n return memory_usage_pct", "def print_numa_stats(numafiles):\n for numafile in numafiles:\n numafile.seek(0)\n node_id = int(numafile.name[numafile.name.find(\"/node/node\")+10:-9])\n ts = int(time.time())\n stats = dict(line.split() for line in numafile.read().splitlines())\n for stat, tag in (# hit: process wanted memory from this node and got it\n (\"numa_hit\", \"hit\"),\n # miss: process wanted another node and got it from\n # this one instead.\n (\"numa_miss\", \"miss\")):\n print (\"sys.numa.zoneallocs %d %s node=%d type=%s\"\n % (ts, stats[stat], node_id, tag))\n # Count this one as a separate metric because we can't sum up hit +\n # miss + foreign, this would result in double-counting of all misses.\n # See `zone_statistics' in the code of the kernel.\n # foreign: process wanted memory from this node but got it from\n # another node. So maybe this node is out of free pages.\n print (\"sys.numa.foreign_allocs %d %s node=%d\"\n % (ts, stats[\"numa_foreign\"], node_id))\n # When is memory allocated to a node that's local or remote to where\n # the process is running.\n for stat, tag in ((\"local_node\", \"local\"),\n (\"other_node\", \"remote\")):\n print (\"sys.numa.allocation %d %s node=%d type=%s\"\n % (ts, stats[stat], node_id, tag))\n # Pages successfully allocated with the interleave policy.\n print (\"sys.numa.interleave %d %s node=%d type=hit\"\n % (ts, stats[\"interleave_hit\"], node_id))", "def get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n print(\"Current usage: %i of 11178\" % gpu_memory_map[1])", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def mem_info():\n meminfo = OrderedDict()\n with open('/proc/meminfo') as f:\n for line in f:\n meminfo[line.split(':')[0]] = line.split(':')[1].strip()\n return meminfo", "def process_memory():\n process = psutil.Process()\n return int(convert.bytetomb(process.memory_info().rss))", "def display_memory(self) -> None:\n return self.__memory", "def print_performance_info(self):\n pass", "def get_memory():\n with open('/proc/meminfo', 'r') as mem:\n free_memory = 0\n for i in mem:\n sline = i.split()\n if str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):\n free_memory += int(sline[1])\n print(\"____________________ \" + str(free_memory) + \"____________________\")\n return free_memory", "def memory():\n\n mem_info = {}\n\n if platform.linux_distribution()[0]:\n with open('/proc/meminfo') as file:\n c = 0\n for line in file:\n lst = line.split()\n if str(lst[0]) == 'MemTotal:':\n mem_info['total'] = int(lst[1])\n elif str(lst[0]) in ('MemFree:', 'Buffers:', 'Cached:'):\n c += int(lst[1])\n mem_info['free'] = c\n mem_info['used'] = (mem_info['total']) - c\n elif platform.mac_ver()[0]:\n ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0]\n vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0]\n\n # Iterate processes\n process_lines = ps.split('\\n')\n sep = re.compile('[\\s]+')\n rss_total = 0 # kB\n for row in range(1, len(process_lines)):\n row_text = process_lines[row].strip()\n row_elements = sep.split(row_text)\n try:\n rss = float(row_elements[0]) * 1024\n except:\n rss = 0 # ignore...\n rss_total += rss\n\n # Process vm_stat\n vm_lines = vm.split('\\n')\n sep = re.compile(':[\\s]+')\n vm_stats = {}\n for row in range(1, len(vm_lines) - 2):\n row_text = vm_lines[row].strip()\n row_elements = sep.split(row_text)\n vm_stats[(row_elements[0])] = int(row_elements[1].strip('\\.')) * 4096\n\n mem_info['total'] = rss_total\n mem_info['used'] = vm_stats[\"Pages active\"]\n mem_info['free'] = vm_stats[\"Pages free\"]\n else:\n raise('Unsupported Operating System.\\n')\n exit(1)\n\n return mem_info", "def mem_report(print_all: bool = False) -> None:\n\n def _mem_report(tensors: Iterable, mem_type: str) -> None:\n \"\"\"Print the selected tensors of type\n\n There are two major storage types in our major concern:\n - GPU: tensors transferred to CUDA devices\n - CPU: tensors remaining on the system memory (usually unimportant)\n\n Args:\n - tensors: the tensors of specified type\n - mem_type: 'CPU' or 'GPU' in current implementation \"\"\"\n print(f\"Storage on {mem_type}\")\n print(\"-\" * LEN)\n total_numel = 0\n total_mem = 0\n visited_data: List[Any] = []\n for tensor in tensors:\n if tensor.is_sparse:\n continue\n # a data_ptr indicates a memory block allocated\n data_ptr = tensor.storage().data_ptr()\n if data_ptr in visited_data:\n continue\n visited_data.append(data_ptr)\n\n numel = tensor.storage().size()\n total_numel += numel\n element_size = tensor.storage().element_size()\n mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte\n total_mem += mem\n element_type = type(tensor).__name__\n size = tuple(tensor.size())\n\n if print_all:\n print(f\"{element_type}\\t\\t{size}\\t\\t{mem}\")\n print(\"-\" * LEN)\n print(f\"Total Tensors: {total_numel} \\tUsed Memory Space: {total_mem}\")\n print(\"-\" * LEN)\n\n LEN = 65\n if print_all:\n print(\"=\" * LEN)\n print(\"Element type\\tSize\\t\\t\\tUsed MEM(MBytes)\")\n tensors = []\n for obj in gc.get_objects():\n try:\n if t.is_tensor(obj) or (hasattr(obj, \"data\") and t.is_tensor(obj.data)):\n tensors.append(obj)\n except Exception:\n pass\n cuda_tensors = [tensor for tensor in tensors if tensor.is_cuda]\n host_tensors = [tensor for tensor in tensors if not tensor.is_cuda]\n _mem_report(cuda_tensors, \"GPU\")\n _mem_report(host_tensors, \"CPU\")\n if print_all:\n print(\"=\" * LEN)", "def get_memory_info(dut):\n command = \"top -n 1 b | grep 'KiB Mem' \"\n output = st.show(dut, command)\n include_keys = ['total', 'used', 'free', 'buff_cache']\n rv = {each_key: ast.literal_eval(output[0][each_key]) for each_key in output[0] if each_key in include_keys}\n return rv", "def test_instant_memory_statistics(self):\n from supvisors.statistics import instant_memory_statistics\n stats = instant_memory_statistics()\n # test bounds (percent)\n self.assertIs(float, type(stats))\n self.assertGreaterEqual(stats, 0)\n self.assertLessEqual(stats, 100)", "def logStats(self, msg):\n self.logLinesStats.append(msg)", "def handleTelemetry(self):\n\t\tprint(\"*****************handleTelemetry\")\n\t\tself.cpuUtilPct = self.cpuUtilTask.getTelemetryValue() # Get CPU usage performance\n\t\tself.memUtilPct = self.memUtilTask.getTelemetryValue() # Get Memory usage performance\n\t\tsysData = SystemPerformanceData()\n\t\tsysData.setCpuUtilization(self.cpuUtilPct)\n\t\tsysData.setMemoryUtilization(self.memUtilPct)\n\t\tself.dataMessageListener.handleSystemPerformanceMessage(sysData)\n\t\tlogging.info('CPU utilization is %s percent, and memory utilization is %s percent.', str(self.cpuUtilPct), str(self.memUtilPct))\n\t\t# Log out the usage performance", "def memory(kdump_memory):\n config_db = ConfigDBConnector()\n if config_db is not None:\n config_db.connect()\n config_db.mod_entry(\"KDUMP\", \"config\", {\"memory\": kdump_memory})", "def getMemDetail(self):\n mem = {}\n if self.type in ['E', 'T', 'S', 'K', 'A', 'AX', 'W']:\n m = \"The percentage of CP memory utilization:\\s*([\\d\\.]+)%\\s+DP memory utilization:\\s*([\\d\\.]+)%\"\n rt = re.search(m, self.dut.cli(\"show memory detail\"))\n if rt:\n mem = {\"cp\": float(rt.groups()[0]), \"dp\": float(rt.groups()[1])}\n return mem", "def _dump_info(resolution, block_size, pwidth):\n V, H = resolution\n M, N = block_size\n bytes = int(ceil(pwidth / 8))\n mem_bytes = 2 * M * H * bytes\n print(\"Memory requirements:\")\n print(\" {:d} bytes for double buffer\".format(mem_bytes))\n\n return bytes, mem_bytes", "def get_screening_log_basic_stats(screening_log_path: str):\n stats_functions.get_stats(log_path=screening_log_path, log_sheet='Screening_Log', log_type='Screening')", "def output_stats(self):\n elapsed = self.timer.elapsed.total_seconds()\n count = self.copied + self.errored\n total = self.total\n # Time per key in milliseconds\n avg = round(elapsed / count * 1000, 3)\n # Time remaining in seconds\n remaining = 1.0 * elapsed / count * (total - count)\n # Time remaining in minutes\n remaining = round(remaining / 60.0, 1)\n # Time taken in minutes\n elapsed = round(elapsed / 60.0, 1)\n\n self.log.info(f\"{self.prefix}: {avg}ms avg, {elapsed}min passed, \"\n f\"{remaining}min remaining. ({count:,}/{total:,})\")", "def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0", "def mem(self) -> List[float]:\n return list(map(attrgetter(\"mem\"), self.stats))", "def monitor_memory(df, before=True, description='', round_at=6):\n from psutil import virtual_memory\n step = 'Before' if before else 'After'\n description = '-' if not description else description+' -'\n \n # Convert values to gigs\n convert_to_gig = lambda x: round(x / 1024**3, round_at)\n \n # Get statistics\n df_shape = df.shape\n df_memory = convert_to_gig(df.memory_usage(deep=True).sum())\n df_server_memory_available = convert_to_gig(virtual_memory().available)\n df_server_memory_used = convert_to_gig(virtual_memory().used)\n \n print('{0} {1} dataframe shape: {2}'.format(step, description, df_shape))\n print('{0} {1} dataframe memory size: {2} GBytes'.format(step, description, df_memory))\n print('{0} {1} server available memory: {2} GBytes'.format(step, description, df_server_memory_available))\n print('{0} {1} server used memory: {2} GBytes'.format(step, description, df_server_memory_used))", "def printHeap(self):\n print self.storeHeap.movies", "def get_memory_info(ssh):\r\n cmd04='wmic memorychip get capacity'\r\n retry_number1=3\r\n try:\r\n while True:\r\n if retry_number1 == 0:\r\n logger.writeLog(\"get memory sum size fail\",level='error')\r\n break\r\n stdin,stdout,stderr=ssh.exec_command(cmd04)\r\n data04=stdout.read().decode().strip('Capacity')\r\n print(data04)\r\n if data04 == \"\":\r\n retry_number1 -= 1\r\n logger.writeLog(\"get memory sum size data null\",level='error')\r\n continue\r\n else:\r\n result_list=data04.split()\r\n print(result_list)\r\n memory_size=float(int(result_list[0])+int(result_list[1]))/1024/1024/1024\r\n print(\"mem total Gb: \",memory_size)\r\n logger.writeLog(\"get memory sum size success\",level='info')\r\n # return memory_size\r\n break\r\n except:\r\n logger.writeLog(\"get memory size error\",level='error')\r\n return None\r\n\r\n#6.内存剩余量/Gb\r\n# def get_memory_surplus(ssh):\r\n \"\"\"get memory surplus\"\"\"\r\n cmd05='wmic OS get FreePhysicalMemory'\r\n retry_number2=3\r\n try:\r\n while True:\r\n if retry_number2 == 0:\r\n logger.writeLog(\"get memory surplus fail\",level='error')\r\n break\r\n stdin,stdout,stderr=ssh.exec_command(cmd05)\r\n data05=int(stdout.read().decode().split()[1])\r\n print(data05)\r\n if data05 == \"\":\r\n logger.writeLog(\"get memory surplus data null\",level='error')\r\n retry_number2 -= 1\r\n continue\r\n else:\r\n memory_surplus=round(float(data05)/1024/1024,4)\r\n print(\"mem free Gb: \",memory_surplus)\r\n logger.writeLog(\"get memory surplus data success\",level='info')\r\n # return memory_surplus\r\n break\r\n except:\r\n logger.writeLog(\"get memory surplus error\",level='error')\r\n return None\r\n\r\n#7.内存使用率\r\n# def get_memory_ratio(ssh):\r\n \"\"\"get memory ratio\"\"\"\r\n # memory_size=get_memory_size(ssh)\r\n # memory_surplus=get_memory_surplus(ssh)\r\n if memory_size == \"\" or memory_surplus == \"\":\r\n logger.writeLog(\"memory_szie is null or memory_surplus is null\",level='error')\r\n return None\r\n else:\r\n try:\r\n data06=round(float((memory_size-memory_surplus))/memory_size,4)\r\n print(\"mem use ratio: \",data06)\r\n logger.writeLog(\"get memory ratio success\",level='info')\r\n return (memory_size,memory_surplus,data06)\r\n except:\r\n logger.writeLog(\"get memory ratio error\",level='error')\r\n return None", "def BalloonInstanceMemory(self, instance, mem):\n # Currently chroots don't have memory limits\n pass", "def showStatistics(self):\n\n deviceName = self.deviceName()\n\n if deviceName:\n stats = a.sys.net.lnx.device.DeviceUtils.getStatistics(self.name, self._log, deviceName) \n if stats:\n for key in stats:\n print \"%s: %s\" % (key, stats[key])", "def memory(self):\n # Run 'free -m' command and make a list from output.\n mem_data = self.execCMD('free', '-m').split()\n total_mem = int(mem_data[7]) / 1024.\n used_mem = int(mem_data[15]) / 1024.\n # Caculate percentage\n used_mem_percent = int(used_mem / (total_mem / 100))\n\n # Results are in kilobyte.\n return total_mem, used_mem, used_mem_percent", "def get_mem_usage():\n return process.memory_info().rss / 1024.**2", "def read_cbmem_log(host=None):\r\n if host:\r\n ret_out = host.run('cbmem -1').stdout\r\n return ret_out\r\n else:\r\n ret_out = utils.run('cbmem -1').stdout\r\n return ret_out", "def print_allocations(self, ):\n pass", "def get_memory(self, mem_type='usedMemory'):\n pass", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def stats(self):\n pass", "def test_stats(test_microvm_with_api):\n test_microvm = test_microvm_with_api\n test_microvm.spawn()\n test_microvm.basic_config()\n test_microvm.add_net_iface()\n\n # Add a memory balloon with stats enabled.\n test_microvm.api.balloon.put(\n amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=1\n )\n\n # Start the microvm.\n test_microvm.start()\n firecracker_pid = test_microvm.jailer_clone_pid\n\n # Get an initial reading of the stats.\n initial_stats = test_microvm.api.balloon_stats.get().json()\n\n # Dirty 10MB of pages.\n make_guest_dirty_memory(test_microvm.ssh, amount_mib=10)\n time.sleep(1)\n # This call will internally wait for rss to become stable.\n _ = get_stable_rss_mem_by_pid(firecracker_pid)\n\n # Make sure that the stats catch the page faults.\n after_workload_stats = test_microvm.api.balloon_stats.get().json()\n assert initial_stats.get(\"minor_faults\", 0) < after_workload_stats[\"minor_faults\"]\n assert initial_stats.get(\"major_faults\", 0) < after_workload_stats[\"major_faults\"]\n\n # Now inflate the balloon with 10MB of pages.\n test_microvm.api.balloon.patch(amount_mib=10)\n # This call will internally wait for rss to become stable.\n _ = get_stable_rss_mem_by_pid(firecracker_pid)\n\n # Get another reading of the stats after the polling interval has passed.\n inflated_stats = test_microvm.api.balloon_stats.get().json()\n\n # Ensure the stats reflect inflating the balloon.\n assert after_workload_stats[\"free_memory\"] > inflated_stats[\"free_memory\"]\n assert after_workload_stats[\"available_memory\"] > inflated_stats[\"available_memory\"]\n\n # Deflate the balloon.check that the stats show the increase in\n # available memory.\n test_microvm.api.balloon.patch(amount_mib=0)\n # This call will internally wait for rss to become stable.\n _ = get_stable_rss_mem_by_pid(firecracker_pid)\n\n # Get another reading of the stats after the polling interval has passed.\n deflated_stats = test_microvm.api.balloon_stats.get().json()\n\n # Ensure the stats reflect deflating the balloon.\n assert inflated_stats[\"free_memory\"] < deflated_stats[\"free_memory\"]\n assert inflated_stats[\"available_memory\"] < deflated_stats[\"available_memory\"]", "def get_mem_info():\n MemInfoEntry = namedtuple('MemInfoEntry', ['value', 'unit'])\n mem_info = {}\n with open('/proc/meminfo') as file:\n for line in file:\n key, value, *unit = line.strip().split()\n mem_info[key.rstrip(':')] = MemInfoEntry(value, unit)\n return mem_info", "def debug(self):\n print(self.memory)\n print('r0 = %s, ip = %s' % (self.r0, self.ip))", "def print_metrics(result):\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, ' KEY METRICS: ')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* pages_count: %d',\n get_counter_metric(result, 'pages_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_count: %d',\n get_counter_metric(result, 'revisions_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* very_long_page_histories_count: %d',\n get_counter_metric(result, 'very_long_page_histories_count'))\n revisions_per_page_distr = get_distributions_metric(\n result, 'revisions_per_page_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.mean: %d',\n revisions_per_page_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.sum: %d',\n revisions_per_page_distr.sum)\n cumulative_page_rev_size_distr = get_distributions_metric(\n result, 'cumulative_page_rev_size_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '* cumulative_page_rev_size_distr.mean: %d',\n cumulative_page_rev_size_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* cumulative_page_rev_size_distr.sum: %d',\n cumulative_page_rev_size_distr.sum)", "def subcmd_getmemory_main(args, parameter_info):\n \n from get_memory_inventory import get_memory_inventory\n result = get_memory_inventory(parameter_info['ip'], parameter_info['user'], parameter_info['passwd'], parameter_info['sysid'], None)\n \n if result['ret'] is True:\n del result['ret']\n sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))\n else:\n sys.stderr.write(result['msg'])", "def get_telemetry ():\n telemetry = OrderedDict()\n\n telemetry[\"ip_addr\"] = socket.gethostbyname(socket.gethostname())\n\n telemetry[\"mem_free\"] = psutil.virtual_memory().free\n\n telemetry[\"cpu_num\"] = psutil.NUM_CPUS\n\n x = psutil.cpu_times()\n telemetry[\"cpu_times\"] = OrderedDict([ (\"user\", x.user), (\"system\", x.system), (\"idle\", x.idle) ])\n\n x = psutil.disk_usage(\"/tmp\")\n telemetry[\"disk_usage\"] = OrderedDict([ (\"free\", x.free), (\"percent\", x.percent) ])\n\n x = psutil.disk_io_counters()\n telemetry[\"disk_io\"] = OrderedDict([ (\"read_count\", x.read_count), (\"write_count\", x.write_count), (\"read_bytes\", x.read_bytes), (\"write_bytes\", x.write_bytes), (\"read_time\", x.read_time), (\"write_time\", x.write_time) ])\n\n x = psutil.network_io_counters()\n telemetry[\"network_io\"] = OrderedDict([ (\"bytes_sent\", x.bytes_sent), (\"bytes_recv\", x.bytes_recv), (\"packets_sent\", x.packets_sent), (\"packets_recv\", x.packets_recv), (\"errin\", x.errin), (\"errout\", x.errout), (\"dropin\", x.dropin), (\"dropout\", x.dropout) ])\n\n return telemetry", "def mem_per_proc(self):\n return self._mem_per_proc", "def print_global_statistics(stats):\n\n print('Final Results')\n print('LED: {} WED: {}'.format(stats.global_letter_edit_distance,stats.global_word_edit_distance))", "def getMemory(self):\n return self.memory", "def stats_process():\n nonlocal d_stats, b_status\n log = slog()\n d_stats = self.stats_compute()\n if self.toConsole() or self.args['duf'] or self.args['du']:\n self.dp.qprint(d_stats['report'], level = self.debugLevel)\n slog_filter = filters_show()\n log.title_set('Size statistics')\n if self.args['table3D']: log.render3D()\n log('Total size (raw): %d\\n' % d_stats['totalSize'] )\n log('Total size (friendly): {:,}\\n'.format(d_stats['totalSize']) )\n log('Total size (human): %s\\n' % d_stats['totalSize_human'] )\n log('Total files: %s\\n' % d_stats['files'] )\n log('Total dirs: %s\\n' % d_stats['dirs'] )\n log('Total runtime: %5.3f s' % other.toc() )\n b_status = b_status and d_stats['status']\n return {\n 'status': b_status,\n 'filterLog': slog_filter,\n 'bodyLog': log\n }", "def memory(self):\r\n return self._memory", "def report(self):\r\n print(\"\".join(self.memory), self.error, self.steps)", "def get_meminfo():\r\n info = {}\r\n with open('/proc/meminfo') as f:\r\n for line in f:\r\n m = _MEMINFO_RE.match(line)\r\n if m:\r\n if m.group(2):\r\n name = m.group(1) + '_' + m.group(2)[1:-1]\r\n else:\r\n name = m.group(1)\r\n info[name] = int(m.group(3))\r\n return collections.namedtuple('MemInfo', list(info.keys()))(**info)", "def memory_get_usage():\n raise NotImplementedError()", "def test00(self):\n\n # Obtain memory info (only for Linux 2.6.x)\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"VmSize:\"):\n vmsize = int(line.split()[1])\n elif line.startswith(\"VmRSS:\"):\n vmrss = int(line.split()[1])\n elif line.startswith(\"VmData:\"):\n vmdata = int(line.split()[1])\n elif line.startswith(\"VmStk:\"):\n vmstk = int(line.split()[1])\n elif line.startswith(\"VmExe:\"):\n vmexe = int(line.split()[1])\n elif line.startswith(\"VmLib:\"):\n vmlib = int(line.split()[1])\n print(\"\\nWallClock time:\", clock() - self.tref)\n print(\"Memory usage: ******* %s *******\" % self._getName())\n print(f\"VmSize: {vmsize:>7} kB\\tVmRSS: {vmrss:>7} kB\")\n print(f\"VmData: {vmdata:>7} kB\\tVmStk: {vmstk:>7} kB\")\n print(f\"VmExe: {vmexe:>7} kB\\tVmLib: {vmlib:>7} kB\")", "def test_memoryleak(self):\n N = 1000\n if logger.getEffectiveLevel() <= logging.INFO:\n logger.debug(\"Testing for memory leak\")\n for i in range(N):\n _img = fabio.open(self.mar)\n print(\"Reading #%s/%s\" % (i, N))", "def get_memory_usage():\n\n memory_usage = {'total' : 0, 'used' : 0}\n meminfo = subprocess.Popen(['free', '-m'], shell=False, stdout=subprocess.PIPE)\n meminfo.stdout.readline()\n total_used = meminfo.stdout.readline()\n memory_usage['total'] = total_used.split()[1]\n memory_usage['used'] = total_used.split()[2]\n return memory_usage", "def _logging_smm(self, stats_obs, stats_sim):\n fname = 'monitoring.estimagic.smm.info'\n if self.num_evals == 1 and os.path.exists(fname):\n os.unlink(fname)\n\n with open(fname, 'a+') as outfile:\n\n fmt_ = '\\n\\n{:>8}{:>15}\\n\\n'\n outfile.write(fmt_.format('EVALUATION', self.num_evals))\n\n fmt_ = '{:>8}' + '{:>15}' * 4 + '\\n\\n'\n info = ['Moment', 'Observed', 'Simulated', 'Difference', 'Weight']\n outfile.write(fmt_.format(*info))\n\n for i, moment in enumerate(stats_obs):\n\n stat_obs, stat_sim = stats_obs[i], stats_sim[i]\n info = [i, stat_obs, stat_sim, abs(stat_obs - stat_sim), self.weighing_matrix[i, i]]\n\n fmt_ = '{:>8}' + '{:15.5f}' * 4 + '\\n'\n outfile.write(fmt_.format(*info))", "def printMachineStatOut():\n print(\"---------------MACHINES STATS --------------------------\\n\", file=out_file)\n for machine in machines_list:\n cur_job_list = machine.retrieveJobsList()\n print(\"machine number \", machine.number, \"assigned jobs [number,length,type]:\", file=out_file)\n l = []\n for job_number, job in cur_job_list.items():\n l.append(job)\n print(\"\".join(str(l)), file=out_file)\n\n print(\"Assigned types: \", machine.getTypes(), file=out_file)\n print(\"Types histogram: \", machine.types, \"Sum of each type: \", machine.types_sums, \"Makespan : \", machine.span,\n file=out_file)\n print(\"\\n\", file=out_file)\n print(\"Max makespan is : \", makeSpan(), file=out_file)", "def warn_replay_buffer_size(*, item: SampleBatchType, num_items: int) -> None:\n if log_once(\"replay_buffer_size\"):\n item_size = item.size_bytes()\n psutil_mem = psutil.virtual_memory()\n total_gb = psutil_mem.total / 1e9\n mem_size = num_items * item_size / 1e9\n msg = (\"Estimated max memory usage for replay buffer is {} GB \"\n \"({} batches of size {}, {} bytes each), \"\n \"available system memory is {} GB\".format(\n mem_size, num_items, item.count, item_size, total_gb))\n if mem_size > total_gb:\n raise ValueError(msg)\n elif mem_size > 0.2 * total_gb:\n logger.warning(msg)\n else:\n logger.info(msg)", "def GraphMemVsSize(data, args, cmd):\n p = data[args][cmd]\n vers = sorted(p)\n sizes = sorted(p[vers[0]])\n for ver in vers:\n mems = [p[ver][size][1] for size in sizes]\n plt.plot(sizes, mems, label=ver)\n if cmd == 'delta':\n mult=10240\n else:\n mult=32\n ax = plt.gca()\n ax.yaxis.set_major_locator(MultipleLocator(mult))\n ax.set_xlim(left=0, right=1024)\n #plt.xscale('log')\n #plt.yscale('log')\n saveplt('data/mem-size-%s-%s.svg' % (args,cmd), '%s memory vs filesize for %s' % (cmd, args),\n 'filesize', 'KB', sizeticks)", "def test_mem_available():\n result = _run_metric('mem_available')\n assert result.exit_code == 0", "def _memory(self):\n memory = {}\n memory_used = cpmCPUMemoryUsed\n varbinds = self._snmp_connection.bulk_walk(memory_used)\n for varbind in varbinds:\n # grab the last element of the index to use as the memory_id\n if self._cisco_model in self._n3k_models:\n memory_id = self._process_mib_indices_table[int(varbind.index.split('.')[-1])]\n else:\n memory_id = int(varbind.index.split('.')[-1])\n memory[memory_id] = {u'memory_used': int(varbind.value)}\n\n memory_free = cpmCPUMemoryFree\n varbinds = self._snmp_connection.bulk_walk(memory_free)\n for varbind in varbinds:\n # grab the last element of the index to use as the memory_id\n if self._cisco_model in self._n3k_models:\n memory_id = self._process_mib_indices_table[int(varbind.index.split('.')[-1])]\n else:\n memory_id = int(varbind.index.split('.')[-1])\n memory[memory_id][u'memory_free'] = int(varbind.value)\n memory[memory_id][u'memory_total'] = memory[memory_id][u'memory_used'] + int(varbind.value)\n\n for memory_id in list(memory.keys()):\n if memory_id in self._module_numbers:\n if int(self._module_numbers[memory_id]) in self._entity_physical_names:\n memory[memory_id][u'memory_type'] = u\"Module {} ({})\".format(self._module_numbers[memory_id],\n self._entity_physical_names[\n int(self._module_numbers[memory_id])])\n else:\n memory[memory_id][u'memory_type'] = u\"Module {}\".format(self._module_numbers[memory_id])\n\n if not len(memory):\n self._logger.warn(\n u'Failed to get memory enrichments on device \"%s\" with model \"%s\"' %\n (self._device_fqdn, self._cisco_model))\n\n return memory", "def get_meminfo():\n\n mem_info = {}\n re_keyval = re.compile(r'^\\s*(\\S+)\\s*[=:]\\s*(\\d+)')\n try:\n with open(MEMINFO, 'r') as mem_file:\n for line in mem_file:\n match = re_keyval.search(line)\n if match:\n keyfile = match.group(1)\n val = match.group(2)\n mem_info[keyfile] = int(val)\n except IOError as err:\n LOG.error('%s: Cannot read meminfo, error=%s',\n 'platform memory usage', err)\n return mem_info\n\n return mem_info", "def get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ])#, encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n return gpu_memory_map", "def get_gpu_memory_map():\n\tresult = subprocess.check_output(\n\t\t[\n\t\t\t'nvidia-smi', '--query-gpu=memory.free',\n\t\t\t'--format=csv,nounits,noheader'\n\t\t])\n\t# Convert lines into a dictionary\n\tresult=result.decode('utf-8')\n\tprint(result)\n\tgpu_memory = [int(x) for x in result.strip().split('\\n')]\n\tgpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n\treturn gpu_memory_map", "def __repr__(self):\n return \"This {} has {} GB of memory\".format(\n self.name,\n self.memory_in_gb\n )" ]
[ "0.70418596", "0.6867534", "0.643289", "0.6419121", "0.6389766", "0.6379068", "0.59949344", "0.5943068", "0.5942947", "0.5925752", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.5925409", "0.59162056", "0.58963734", "0.5882967", "0.5870274", "0.5868042", "0.58618057", "0.58380634", "0.5837955", "0.5830046", "0.5812539", "0.5805553", "0.5799975", "0.578249", "0.576636", "0.5727731", "0.5715636", "0.56513697", "0.5629845", "0.560449", "0.5579249", "0.5558015", "0.5553862", "0.5531634", "0.55079323", "0.54978627", "0.5493497", "0.54787254", "0.54605037", "0.54577076", "0.5428062", "0.5417826", "0.5413298", "0.5404939", "0.54007924", "0.54000276", "0.53870004", "0.5369451", "0.53559893", "0.5355849", "0.5346946", "0.5336895", "0.5331194", "0.53292954", "0.5328432", "0.53101474", "0.5305196", "0.5279564", "0.5278824", "0.5270628", "0.5255561", "0.5253127", "0.5252915", "0.5251369", "0.5242203", "0.5238992", "0.5238232", "0.5238011", "0.5235328", "0.5227483", "0.52246803", "0.5217155", "0.5211287", "0.52085835", "0.52016914", "0.5201502", "0.51918894", "0.51893806", "0.5188403", "0.5185868", "0.5179354", "0.51734", "0.5168819", "0.5161632", "0.5161338", "0.5160269", "0.5155455", "0.5140247", "0.5137055", "0.51332486", "0.5131368", "0.5116009", "0.5114475", "0.51128876" ]
0.798583
0
Calls HeapValidate(GetProcessHeap(), 0, NULL);
def checkProcessHeap(): # Get the process heap. try: hHeap = ctypes.windll.kernel32.GetProcessHeap(); except: reporter.logXcpt(); return False; # Check it. try: fIsOkay = ctypes.windll.kernel32.HeapValidate(hHeap, 0, None); except: reporter.logXcpt(); return False; if fIsOkay == 0: reporter.log('HeapValidate failed!'); # Try trigger a dump using c:\utils\procdump64.exe. from common import utils; iPid = os.getpid(); asArgs = [ 'e:\\utils\\procdump64.exe', '-ma', '%s' % (iPid,), 'c:\\CrashDumps\\python.exe-%u-heap.dmp' % (iPid,)]; if utils.getHostArch() != 'amd64': asArgs[0] = 'c:\\utils\\procdump.exe' reporter.log('Trying to dump this process using: %s' % (asArgs,)); utils.processCall(asArgs); # Generate a crash exception. ctypes.windll.msvcrt.strcpy(None, None, 1024); return True;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def precheck(self):\n if self.__memory_size is None:\n self.logger.exception(\"[Memory] Please set memory size.\")\n raise ArgsNotCorrect(\"Please set memory size.\")", "def test_func_heap(self):\n cmd = \"deref $_heap()\"\n target = _target(\"heap\")\n self.assertFailIfInactiveSession(gdb_run_cmd(cmd, target=target))\n res = gdb_run_silent_cmd(cmd, target=target)\n self.assertNoException(res)\n if is_64b():\n self.assertIn(\"+0x0048:\", res)\n else:\n self.assertIn(\"+0x0024:\", res)\n\n cmd = \"deref $_heap(0x10+0x10)\"\n res = gdb_run_silent_cmd(cmd, target=target)\n self.assertNoException(res)\n if is_64b():\n self.assertIn(\"+0x0048:\", res)\n else:\n self.assertIn(\"+0x0024:\", res)", "def test_validate_factorial_heap_pq(self):\n from ch04.factorial_heap import PQ, validate\n\n end = 10000\n pq = PQ(end)\n for i in range(end):\n pq.enqueue(i, i)\n validate(pq)\n\n last = end-1\n while pq:\n self.assertEqual(last, pq.dequeue())\n last -= 1\n validate(pq)", "def _checkAvailableMemory():\n #execute free -m to get output in MB\n logging.debug(\"checking total memory\")\n cmd = [\n basedefs.EXEC_FREE, \"-m\"\n ]\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_EXP_FREE_MEM)\n\n #itterate over output and look for the line: \"Mem: 1 something\"\n #and extract 1 from it (1 is an example to the free memory)\n availableMemory = 0\n for line in output.split(\"\\n\"):\n result = re.match(\"Mem:\\s+(\\d+)\\s+.+\", line)\n if result:\n logging.debug(\"Found a match, amount of memory: %s\" % result.group(1))\n availableMemory = result.group(1)\n\n #compare found memory to restrictions\n availableMemory = int(availableMemory)\n #multiplying CONST_MIN_MEMORY by 0.95 to have tolerance of 5%\n if availableMemory < (basedefs.CONST_MIN_MEMORY_MB * 0.95):\n logging.error(\"Availble memory (%s) is lower then the minimum requirments (%s)\" % (availableMemory, basedefs.CONST_MIN_MEMORY_MB))\n raise Exception(output_messages.ERR_EXP_NOT_EMOUGH_MEMORY)\n\n if availableMemory < basedefs.CONST_WARN_MEMORY_MB:\n logging.warn(\"There is less then %s available memory \" % basedefs.CONST_WARN_MEMORY_MB)\n controller.MESSAGES.append(output_messages.WARN_LOW_MEMORY)", "def test_pop(self):\n self.assertRaises(EmptyHeapException, self.minheap.pop)\n self.minheap.heap = [0, 1, 4, 7, 9]\n assert self.minheap.pop() == 1\n assert self.minheap.heap == [0, 4, 9, 7]", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def check_mem(self, values):\n try:\n virt_mem = psutil.virtual_memory()\n values[keys.KEY_VIRTUAL_MEM_TOTAL] = virt_mem.total\n values[keys.KEY_VIRTUAL_MEM_PERCENT] = virt_mem.percent\n except:\n logging.error(\"Error collecting memory stats.\")", "def test_static_is_heap(self):\n good = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n bad = [1,2,3,114,5,6,7,8,9,10]\n\n self.assertTrue(Heap.is_heap(good), 'should hold the heap property')\n self.assertFalse(Heap.is_heap(bad), 'should not hold the heap property')", "def CalcNewErrorMeasures(self):\n for p in self.Active[:self.N_Active]:\n if self.Errors[p] < 0.0:\n #print self.CalcErrorMeasure(p), p\n self.Errors[p] = self.CalcErrorMeasure(p)\n # Add new values to the heap\n self.Active[:self.heap_length+1],dummy= maxheap.heap_insert(self.Errors[:self.N_Idx], \n p, self.Active[:self.heap_length+1],\n self.heap_length)\n self.heap_length +=1\n \n if self.heap_length != self.N_Active:\n raise ValueError", "def hxlvalidate():\n run_script(hxlvalidate_main)", "def __validate():\n # TODO: implement", "def minHeap(self):\n for pos in range(self.size // 2, 0, -1):\n self.minHeapify(pos)", "def CleanUp(self):\n if self.process != 0 and self.mem_address != 0:\n # free up the memory we allocated\n #win32api.SetLastError(0)\n self.CheckGuardSignature()\n\n ret = win32functions.VirtualFreeEx(\n c_void_p(self.process),\n c_void_p(self.mem_address),\n win32structures.ULONG_PTR(0),\n wintypes.DWORD(win32defines.MEM_RELEASE))\n if ret == 0:\n print('Error: CleanUp: VirtualFreeEx() returned zero for address ', hex(self.mem_address))\n last_error = win32api.GetLastError()\n print('LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip())\n sys.stdout.flush()\n self._CloseHandle()\n raise WinError()\n self.mem_address = 0\n self._CloseHandle()\n else:\n pass # ActionLogger().log('\\nWARNING: Cannot call VirtualFreeEx! process_id == 0.')", "def heapleak():\n for i in range(16):\n evl('{}'.format(i))\n\n # Trigger heap info leak\n evl('h=0+0')\n return readintvar('h') & 0xfffffffffffff000", "def validate(self):\n errors = []\n app = errors.append\n\n if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:\n app(\"self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied\")\n\n if self.omp_threads > self.hw.cores_per_node:\n app(\"omp_threads > hw.cores_per_node\")\n\n if self.mem_per_proc > self.hw.mem_per_node:\n app(\"mem_mb >= self.hw.mem_per_node\")\n\n if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:\n app(\"self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied\")\n\n if self.priority <= 0:\n app(\"priority must be > 0\")\n\n if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):\n app(\"1 <= min_cores <= hardware num_cores >= hint_cores not satisfied\")\n\n if errors:\n raise self.Error(str(self) + \"\\n\".join(errors))", "def bad_cgroup_processes_check():\n return CGCheck([], bad_cgroup_processes)", "def sanity_check(self, test_vec_handle):\n self.vec_space.sanity_check(test_vec_handle)", "def __init__(self, heap_used=None, heap_committed=None, heap_max=None, non_heap_used=None, non_heap_committed=None, non_heap_max=None, direct_count=None, direct_used=None, direct_max=None, mapped_count=None, mapped_used=None, mapped_max=None, memory_segments_available=None, memory_segments_total=None, garbage_collectors=None): # noqa: E501 # noqa: E501\n self._heap_used = None\n self._heap_committed = None\n self._heap_max = None\n self._non_heap_used = None\n self._non_heap_committed = None\n self._non_heap_max = None\n self._direct_count = None\n self._direct_used = None\n self._direct_max = None\n self._mapped_count = None\n self._mapped_used = None\n self._mapped_max = None\n self._memory_segments_available = None\n self._memory_segments_total = None\n self._garbage_collectors = None\n self.discriminator = None\n if heap_used is not None:\n self.heap_used = heap_used\n if heap_committed is not None:\n self.heap_committed = heap_committed\n if heap_max is not None:\n self.heap_max = heap_max\n if non_heap_used is not None:\n self.non_heap_used = non_heap_used\n if non_heap_committed is not None:\n self.non_heap_committed = non_heap_committed\n if non_heap_max is not None:\n self.non_heap_max = non_heap_max\n if direct_count is not None:\n self.direct_count = direct_count\n if direct_used is not None:\n self.direct_used = direct_used\n if direct_max is not None:\n self.direct_max = direct_max\n if mapped_count is not None:\n self.mapped_count = mapped_count\n if mapped_used is not None:\n self.mapped_used = mapped_used\n if mapped_max is not None:\n self.mapped_max = mapped_max\n if memory_segments_available is not None:\n self.memory_segments_available = memory_segments_available\n if memory_segments_total is not None:\n self.memory_segments_total = memory_segments_total\n if garbage_collectors is not None:\n self.garbage_collectors = garbage_collectors", "def is_in_heap(self, address):\n return self.is_address_of_type(address, MemoryType.MajorHeap, MemoryType.MinorHeap)", "def min_heap(self): \n \n for pos in range(self.size//2, 0, -1): \n self.min_heapify(pos)", "def __init__(self):\r\n self.maxHeap = []\r\n self.minHeap = []", "def __init__(self):\n self.max_heap = MaxHeap()\n self.min_heap = MinHeap()", "def test_free_space_rejects_file_arguments():\n result = _run_metric('free_space', '/etc/hosts')\n # 2 is the exit code for a UsageError, which includes bad parameters.\n assert result.exit_code == 2\n # Is this too fragile?\n assert 'Invalid value' in result.output", "def modifyHeapSizeProperties(self):\n pass", "def validate():", "def _sanity_check_m2ee_stats(m2ee_stats):\n for memory_type, memory_value in m2ee_stats[\"memory\"].items():\n if not isinstance(memory_value, int):\n # Memorypools are here and are stored as a dict\n continue\n\n if memory_value < 0:\n # memory value can be zero, but not negative\n logging.error(\n \"Memory stats with non-logical values: %s\",\n m2ee_stats[\"memory\"],\n )\n raise RuntimeError(\n \"Memory statistics have non-logical values. This will \"\n \"cause incorrect data in your application's metrics. \"\n \"Please contact support!\"\n )", "def validate(self, tracked_pids, test_case=stubTestcase, debug=False):\n \n out = os.popen('dmesg -c -s %d' % LOG_BUF_LEN)\n dmesg_lines = out.readlines()\n out.close()\n \n allocations = []\n memory_allocated = False\n \n if debug:\n f = open('mm_debug.txt', 'w+')\n f.write('All KMALLOC/KFREE messages:\\n\\n')\n f.write(''.join(dmesg_lines))\n f.write('\\nTracked pids: %s\\nOnly relevant KMALLOC/KFREE messages:\\n' % repr(tracked_pids))\n \n for line in dmesg_lines:\n re_result = re.search(r'.*?(KMALLOC|KFREE) (\\d*) (\\w*)', line)\n if not re_result:\n continue\n \n action = re_result.group(1)\n pid = int(re_result.group(2))\n address = re_result.group(3)\n \n if pid not in tracked_pids:\n continue\n \n f.write(line)\n\n f.write('\\nProcessing KMALLOC/KFREE messages:\\n')\n \n try:\n for line in dmesg_lines:\n re_result = re.search(r'.*?(KMALLOC|KFREE) (\\d*) (\\w*)', line)\n if not re_result:\n continue\n \n action = re_result.group(1)\n pid = int(re_result.group(2))\n address = re_result.group(3)\n \n if pid not in tracked_pids:\n continue\n \n if debug:\n f.write(line)\n \n if action == 'KMALLOC':\n memory_allocated = True\n if address in allocations:\n test_case.fail('Same address, %s, allocated twice without release.' % address)\n break\n allocations.append(address)\n \n if action == 'KFREE':\n if address not in allocations:\n test_case.fail('Freeing a non allocated address, %s.' % address)\n break\n allocations.remove(address)\n else:\n test_case.assert_(memory_allocated, 'No memory allocated during execution.') \n test_case.assert_(not allocations, 'Failed to free some of the allocated memory, left %d:\\n%s' % (len(allocations), '\\n'.join(allocations)))\n finally:\n if debug:\n f.close()", "def test_prevent_wrong_memory(self):\n self.assertRaises(cinv.host.Error, self.wrong_memory)", "def checkmem(self,file_,line_): # 3\n res = self.__obj.checkmemtask(file_,line_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def __init__(self):\n self.heapList = [0]\n self.currentSize = 0", "def test_BLINK_LAUNCH_PROCESS(self):\n self.verify_references_to_prerequisites(processes.BLINK_LAUNCH_PROCESS)", "def pageFault(proc):\n\n global pfList\n pfList.append([proc, 1])", "def check(self, runtime):", "def auditmemallocfail(self) :\n\t\ttry :\n\t\t\treturn self._auditmemallocfail\n\t\texcept Exception as e:\n\t\t\traise e", "def __init__(self):\n self.heap = []", "def __init__(self):\n self.heap = []", "def __init__(self):\n self.heap = []", "def main():\n heap = MinHeap()\n for i in range(10):\n heap.add(i)\n print(heap.peek())\n for i in range(4):\n heap.poll()\n print(heap.peek())", "def __init__(self):\n self.heap = [None]", "def validateProcess(process):\n \n schedule=process.schedule_()\n paths=process.paths_()\n endpaths=process.endpaths_()\n \n # check output mods are in paths and have appropriate settings\n for outputModName in process.outputModules_().keys():\n outputMod = getattr(process, outputModName)\n if not hasattr(outputMod, 'dataset'):\n msg = \"Process contains output module without dataset PSET: %s \\n\" % outputModName\n msg += \" You need to add this PSET to this module to set dataTier and filterName\\n\"\n raise RuntimeError(msg)\n ds=getattr(outputMod,'dataset')\n if not hasattr(ds, \"dataTier\"):\n msg = \"Process contains output module without dataTier parameter: %s \\n\" % outputModName\n msg += \" You need to add an untracked parameter to the dataset PSET of this module to set dataTier\\n\"\n raise RuntimeError(msg)\n\n # check module in path or whatever (not sure of exact syntax for endpath)\n omRun=False\n\n if schedule==None:\n for path in paths:\n if outputModName in getattr(process,path).moduleNames():\n omRun=True\n for path in endpaths:\n if outputModName in getattr(process,path).moduleNames():\n omRun=True\n else:\n for path in schedule:\n if outputModName in path.moduleNames():\n omRun=True\n if omRun==False:\n msg = \"Output Module %s not in endPath\" % outputModName\n raise RuntimeError(msg)", "def __init__(self):\n self.min_heap = []\n self.max_heap = []\n self.size_max, self.size_min = 0, 0", "def __init__(self):\n self.heap1 = []\n self.heap2 = []\n self.size = 0", "def __init__(self):\n self.__max_heap = []\n self.__min_heap = []", "def test_deleter(self):\n self.rebuild_all()\n l = len(self.real_heap)\n #print \"Before :\",l\n for member_remove in xrange(0,l):\n self.rebuild_all()\n \n l = len(self.real_heap)\n #print \"Aftre :\",l\n #print \"To delete :\",self.real_heap\n #print \"member to delete :\",member_remove\n self.real_heap.new_delete(member_remove)\n assert self.is_heap_valid(self.real_heap) == True\n\n #check if it is there ?\n tmp_max = max(self.real_heap)\n real_max = self.real_heap.extract_max()\n assert tmp_max == real_max\n assert self.is_heap_valid(self.real_heap) == True\n\n if member_remove%10==0:\n print \"Deletion of %d/%d is completed\"%(member_remove,l)", "def testZeroSize(self):\n hd = HeapDict(size=0)\n hd.push('a', 1)\n hd.push('b', 1)\n self.assertEqual(hd.get_result(), {'a': [], 'b': []})", "def _validate(self):\n pass", "def __init__(self, heap=[]):\n\n # logger_cagada.debug(\"pero si el orig heap %s\" % heap)\n heapq.heapify(heap)\n # logger_cagada.debug(\"a cihnga el heap %s\" % heap)\n self.heap = heap\n self.entry_finder = dict({i[-1]: i for i in heap})\n # logger_cagada.debug(\"el finder es %s\" % self.entry_finder)\n self.REMOVED = sys.maxsize", "def empty(heap):\n return size(heap) == 0", "def _validate_hyperparameters(self):\n\n if (self.reg_gamma < 0) or (self.reg_gamma > 1):\n raise ValueError(\"reg_gamma must be >= 0 and <1, got %s.\" % self.reg_gamma)\n \n if self.xmin > self.xmax:\n raise ValueError(\"xmin must be <= xmax, got %s and %s.\" % (self.xmin, self.xmax))", "def test_free_space_without_arguments():\n result = _run_metric('free_space')\n assert result.exit_code == 0\n assert '%' in result.output", "def check_if_sufficient_memory():\n percent_memory = psutil.virtual_memory().percent\n if percent_memory > 75:\n raise ValueError('Please use a device with more CPU ram or a smaller dataset')", "def __init__(self):\n # min heap for right part, max heap for left part\n self.minHeap_right = []\n self.maxHeap_left = []\n self.tot_num = 0", "def __init__(self):\n super(_SerializedEventHeap, self).__init__()\n self._heap = []\n self.data_size = 0", "def validate(self):\n if not self.keys:\n raise ValueError(\"Virtual host missing keys\")\n for i in self.keys:\n i.validate()", "def min_system_resources(node):\n\n min_sys_res = True\n\n # CPUs\n if \"layout\" in node[\"cpu\"]:\n total_cpus = len(node[\"cpu\"][\"layout\"])\n if total_cpus < 2:\n print(\n \"\\nThere is only {} CPU(s) available on this system. \"\n \"This is not enough to run VPP.\".format(total_cpus)\n )\n min_sys_res = False\n\n # System Memory\n if (\n \"free\" in node[\"hugepages\"]\n and \"memfree\" in node[\"hugepages\"]\n and \"size\" in node[\"hugepages\"]\n ):\n free = node[\"hugepages\"][\"free\"]\n memfree = float(node[\"hugepages\"][\"memfree\"].split(\" \")[0])\n hugesize = float(node[\"hugepages\"][\"size\"].split(\" \")[0])\n\n memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize\n percentmemhugepages = (memhugepages / memfree) * 100\n if free is \"0\" and percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:\n print(\n \"\\nThe System has only {} of free memory. You will not \"\n \"be able to allocate enough Huge Pages for VPP.\".format(\n int(memfree)\n )\n )\n min_sys_res = False\n\n return min_sys_res", "def validate(self):\n AcceleratorType.validate(self.accelerator_type)\n gcp.validate_machine_configuration(self.cpu_cores,\n self.memory,\n self.accelerator_type,\n self.accelerator_count)", "def test_free_space_rejects_nonexistent_paths():\n totally_made_up_path = \"/cwmon/{0}\".format(uuid.uuid4())\n result = _run_metric('free_space', totally_made_up_path)\n # 2 is the exit code for a UsageError, which includes bad parameters.\n assert result.exit_code == 2\n # Is this too fragile?\n assert 'Invalid value' in result.output", "def test_heap_sort(self):\n integers = heap_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def is_valid_data(self, address):\n return self.is_address_of_type(address,\n MemoryType.MajorHeap, MemoryType.MinorHeap,\n MemoryType.StaticData, MemoryType.Stack,\n MemoryType.Finalisers)", "def __init__(self):\n self.minheap = []\n self.maxheap = []\n self.len_min = self.len_max = 0", "def heapify(x):\n pass", "def _check(self):\n assert self._leaves, (\n 'Need to validate AssetAllocation before using it.')", "def __init__(self):\n # Initialize a new binary min heap to store the items\n self.heap = MinHeap()", "def test_mem_available():\n result = _run_metric('mem_available')\n assert result.exit_code == 0", "def check_page_faults(con, host, warning, critical,perf_data):\n warning = warning or 10\n critical = critical or 30\n data=get_server_status(con)\n\n try:\n page_faults=float(data['extra_info']['page_faults']) \n except:\n # page_faults unsupported on the underlaying system\n return exit_with_general_critical(\"page_faults unsupported on the underlaying system\")\n \n err,delta=maintain_delta([page_faults],host,\"page_faults\")\n if err==0:\n page_faults_ps=delta[1]/delta[0]\n message = \"Page faults : %.2f ps\" % page_faults_ps\n message+=performance_data(perf_data,[(\"%.2f\" %page_faults_ps,\"page_faults_ps\",warning,critical)])\n return check_levels(page_faults_ps,warning,critical,message)\n else:\n return exit_with_general_warning(\"problem reading data from temp file\")", "def test_noMemoryFromAccept(self):\n return self._acceptFailureTest(ENOMEM)", "def __init__(self):\n self.min_heap = []\n self.max_heap = []", "def process_memory():\n process = psutil.Process()\n return int(convert.bytetomb(process.memory_info().rss))", "def validate(self):\n if not self.os_repos:\n raise ValueError(\"No OS repository available for OS {}\".format(\n self.operating_system.name))\n if not self.template:\n raise ValueError(\"No autoinstallation template specified\")\n if not self.installer_template:\n raise ValueError(\"No installer command line template specified\")\n if not self.system_profile._gateway:\n raise ValueError(\"No gateway interface present\")\n\n self.system_profile.hypervisor.validate()\n\n for iface in self.system_profile.ifaces:\n iface.validate()\n\n # verify gateway interface has IP address and gateways\n if not self.system_profile.list_gateway_networks():\n raise ValueError(\n \"Gateway interface {} has no IP address\"\n \" or gateway route\".format(\n self.system_profile._gateway.os_device_name\n ))\n\n # verify that total partition size is not bigger than disk size\n failing_volume_ids = []\n for volume in [volume for volume in self.system_profile.volumes\n if isinstance(volume, (self.DasdVolume,\n self.ZfcpVolume))]:\n total_part_size = sum(\n [partition.size for partition in volume.partitions])\n if total_part_size > volume.size:\n failing_volume_ids.append(str(volume))\n\n if failing_volume_ids:\n raise ValueError(\n \"Partitioning exceeds volume size for volumes {}\".format(\n failing_volume_ids))", "def __init__(self):\n self.max_heap = list()\n self.min_heap = list()", "def _cli_validate(self, settings, remaining_argv):\n return None", "def heapify(self):\n heapify(self._heap)", "def test_PSA_ONLY_PROCESS(self):\n self.verify_references_to_prerequisites(processes.PSA_ONLY_PROCESS)", "def _check_virtualbox():\n # Adapted from https://github.com/giampaolo/psutil/issues/132#issuecomment-44017679\n # to avoid race conditions\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'VBoxHeadless':\n raise CommandError('S2E uses KVM to build images. VirtualBox '\n 'is currently running, which is not '\n 'compatible with KVM. Please close all '\n 'VirtualBox VMs and try again.')\n except NoSuchProcess:\n pass", "def validate(self):", "def validate(self):", "def validate(self):\n # should this just be folded into the constructor for ProgramNode?\n for func in self.functions:\n func.validate()\n self.validated = True", "def check_core_allocations(host, cpu_counts):\n total_platform_cores = 0\n total_vswitch_cores = 0\n total_shared_cores = 0\n total_isolated_cores = 0\n for s in range(0, len(host.nodes)):\n available_cores = len(host.cpu_lists[s])\n platform_cores = cpu_counts[s][constants.PLATFORM_FUNCTION]\n vswitch_cores = cpu_counts[s][constants.VSWITCH_FUNCTION]\n shared_cores = cpu_counts[s][constants.SHARED_FUNCTION]\n isolated_cores = cpu_counts[s][constants.ISOLATED_FUNCTION]\n requested_cores = \\\n platform_cores + vswitch_cores + shared_cores + isolated_cores\n if requested_cores > available_cores:\n raise wsme.exc.ClientSideError(\n \"More total logical cores requested than present on Processor \"\n \"%s (%s cores).\" % (s, available_cores))\n total_platform_cores += platform_cores\n total_vswitch_cores += vswitch_cores\n total_shared_cores += shared_cores\n total_isolated_cores += isolated_cores\n\n # Validate Platform cores\n if ((constants.CONTROLLER in host.subfunctions) and\n (constants.WORKER in host.subfunctions)):\n if total_platform_cores < 2:\n raise wsme.exc.ClientSideError(\"%s must have at least two cores.\" %\n constants.PLATFORM_FUNCTION)\n elif total_platform_cores == 0:\n raise wsme.exc.ClientSideError(\"%s must have at least one core.\" %\n constants.PLATFORM_FUNCTION)\n for s in range(1, len(host.nodes)):\n if cpu_counts[s][constants.PLATFORM_FUNCTION] > 0:\n raise wsme.exc.ClientSideError(\n \"%s cores can only be allocated on Processor 0\" %\n constants.PLATFORM_FUNCTION)\n\n # Validate shared cores\n for s in range(0, len(host.nodes)):\n shared_cores = cpu_counts[s][constants.SHARED_FUNCTION]\n if host.hyperthreading:\n shared_cores /= 2\n if shared_cores > 1:\n raise wsme.exc.ClientSideError(\n '%s cores are limited to 1 per processor.'\n % constants.SHARED_FUNCTION)\n\n # Validate vswitch cores\n if total_vswitch_cores != 0:\n vswitch_type = cutils.get_vswitch_type(pecan.request.dbapi)\n if constants.VSWITCH_TYPE_NONE == vswitch_type:\n raise wsme.exc.ClientSideError(\n ('vSwitch cpus can only be used with a vswitch_type '\n 'specified.'))\n\n vswitch_physical_cores = total_vswitch_cores\n if host.hyperthreading:\n vswitch_physical_cores /= 2\n if vswitch_physical_cores > VSWITCH_MAX_CORES:\n raise wsme.exc.ClientSideError(\n \"The %s function can only be assigned up to %s cores.\" %\n (constants.VSWITCH_FUNCTION.lower(), VSWITCH_MAX_CORES))\n\n # Validate Isolated cores\n # We can allocate platform cores on numa 0, otherwise all isolated\n # cores must in a contiguous block after the platform cores.\n if total_isolated_cores > 0:\n if total_vswitch_cores != 0 or total_shared_cores != 0:\n raise wsme.exc.ClientSideError(\n \"%s cores can only be configured with %s and %s core types.\" %\n (constants.ISOLATED_FUNCTION, constants.PLATFORM_FUNCTION,\n constants.APPLICATION_FUNCTION))\n has_application_cpus = False\n for s in range(0, len(host.nodes)):\n numa_counts = cpu_counts[s]\n isolated_cores_requested = \\\n numa_counts[constants.ISOLATED_FUNCTION]\n if has_application_cpus and isolated_cores_requested:\n raise wsme.exc.ClientSideError(\n \"%s and %s cpus must be contiguous\" %\n (constants.PLATFORM_FUNCTION, constants.ISOLATED_FUNCTION))\n platform_cores_requested = \\\n numa_counts[constants.PLATFORM_FUNCTION]\n available_cores = len(host.cpu_lists[s])\n\n if platform_cores_requested + isolated_cores_requested \\\n != available_cores:\n has_application_cpus = True\n\n reserved_for_applications = len(host.cpus) - total_platform_cores - \\\n total_vswitch_cores\n if reserved_for_applications <= 0:\n raise wsme.exc.ClientSideError(\n \"There must be at least one unused core for %s.\" %\n constants.APPLICATION_FUNCTION)", "def _bids_validate():\n vadlidator_args = ['--config.error=41']\n exe = os.getenv('VALIDATOR_EXECUTABLE', 'bids-validator')\n\n if platform.system() == 'Windows':\n shell = True\n else:\n shell = False\n\n bids_validator_exe = [exe, *vadlidator_args]\n\n def _validate(bids_root):\n cmd = [*bids_validator_exe, bids_root]\n run_subprocess(cmd, shell=shell)\n\n return _validate", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def allocatememory(self):\n pass", "def check_process_full(self) -> None:\n if len(self.process_queue) >= self.max_processes:\n task_name, sp = self.process_queue.pop()\n sp.wait()", "def validate(self):\n ...", "def _CheckStatusWorkerProcess(self, pid):\n # TODO: Refactor this method, simplify and separate concerns (monitoring\n # vs management).\n self._RaiseIfNotRegistered(pid)\n\n process = self._processes_per_pid[pid]\n\n process_status = self._QueryProcessStatus(process)\n if process_status is None:\n process_is_alive = False\n else:\n process_is_alive = True\n\n process_information = self._process_information_per_pid[pid]\n used_memory = process_information.GetUsedMemory() or 0\n\n if self._worker_memory_limit and used_memory > self._worker_memory_limit:\n logger.warning((\n 'Process: {0:s} (PID: {1:d}) killed because it exceeded the '\n 'memory limit: {2:d}.').format(\n process.name, pid, self._worker_memory_limit))\n self._KillProcess(pid)\n\n if isinstance(process_status, dict):\n self._rpc_errors_per_pid[pid] = 0\n status_indicator = process_status.get('processing_status', None)\n\n else:\n rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1\n self._rpc_errors_per_pid[pid] = rpc_errors\n\n if rpc_errors > self._MAXIMUM_RPC_ERRORS:\n process_is_alive = False\n\n if process_is_alive:\n rpc_port = process.rpc_port.value\n logger.warning((\n 'Unable to retrieve process: {0:s} (PID: {1:d}) status via '\n 'RPC socket: http://localhost:{2:d}').format(\n process.name, pid, rpc_port))\n\n processing_status_string = 'RPC error'\n status_indicator = definitions.STATUS_INDICATOR_RUNNING\n else:\n processing_status_string = 'killed'\n status_indicator = definitions.STATUS_INDICATOR_KILLED\n\n process_status = {\n 'processing_status': processing_status_string}\n\n self._UpdateProcessingStatus(pid, process_status, used_memory)\n\n # _UpdateProcessingStatus can also change the status of the worker,\n # So refresh the status if applicable.\n for worker_status in self._processing_status.workers_status:\n if worker_status.pid == pid:\n status_indicator = worker_status.status\n break\n\n if status_indicator in definitions.ERROR_STATUS_INDICATORS:\n logger.error((\n 'Process {0:s} (PID: {1:d}) is not functioning correctly. '\n 'Status code: {2!s}.').format(process.name, pid, status_indicator))\n\n self._TerminateProcessByPid(pid)\n\n replacement_process = None\n replacement_process_name = 'Worker_{0:02d}'.format(\n self._last_worker_number)\n for replacement_process_attempt in range(\n self._MAXIMUM_REPLACEMENT_RETRIES):\n logger.info((\n 'Attempt: {0:d} to start replacement worker process for '\n '{1:s}').format(replacement_process_attempt + 1, process.name))\n\n replacement_process = self._StartWorkerProcess(replacement_process_name)\n if replacement_process:\n break\n\n time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY)\n\n if not replacement_process:\n logger.error(\n 'Unable to create replacement worker process for: {0:s}'.format(\n process.name))", "def testNumberOfEvents(self):\n event_heap = psort.PsortEventHeap()\n self.assertEqual(event_heap.number_of_events, 0)", "def validate(self):\r\n if self._validated_binaries:\r\n return\r\n\r\n with self._valid_executable('java') as java:\r\n if self._minimum_version:\r\n version = self._get_version(java)\r\n if version < self._minimum_version:\r\n raise self.Error('The java distribution at %s is too old; expecting at least %s and'\r\n ' got %s' % (java, self._minimum_version, version))\r\n\r\n try:\r\n self._validated_executable('javac') # Calling purely for the check and cache side effects\r\n self._is_jdk = True\r\n except self.Error:\r\n if self._jdk:\r\n raise", "def _check_validity(self):\n pass", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def _heapify(self):\n for _ in range(len(self.elements)):\n for i in range(len(self.elements)-1, 0, -1):\n parentPosition = (i-1)/2 # defaults to int i.e. 7/2=3, and 6/2=3\n if parentPosition < 0:\n parentPosition = 0\n \n # change this condition to '>' if coding for max-heap. This is for min-heap.\n if self.elements[i] < self.elements[parentPosition]:\n self.elements[i], self.elements[parentPosition] = self.elements[parentPosition], self.elements[i]", "def __init__(self):\n self.minheap = []\n self.maxheap = []", "def heappop(heap):\n pass", "def __verify_arguments(self):\n if len(self.__pointer_data) == 0:\n raise ValueError(\n \"Input data is empty (size: '%d').\" % len(self.__pointer_data)\n )\n\n if self.__number_clusters <= 0:\n raise ValueError(\n \"Amount of cluster (current value: '%d') for allocation should be greater than 0.\"\n % self.__number_clusters\n )\n\n if self.__numlocal < 0:\n raise ValueError(\n \"Local minima (current value: '%d') should be greater or equal to 0.\"\n % self.__numlocal\n )\n\n if self.__maxneighbor < 0:\n raise ValueError(\n \"Maximum number of neighbors (current value: '%d') should be greater or \"\n \"equal to 0.\" % self.__maxneighbor\n )", "def __init__(self):\n self.maxHeap = []\n self.minHead = []" ]
[ "0.5956563", "0.5820966", "0.5391256", "0.5279991", "0.52643716", "0.52414745", "0.5221388", "0.51908994", "0.5157347", "0.51529896", "0.5116927", "0.50405", "0.50393564", "0.5023326", "0.5012351", "0.5010514", "0.5008411", "0.50031483", "0.4997377", "0.49946463", "0.49777916", "0.4977714", "0.49690604", "0.4964627", "0.49629122", "0.4962077", "0.49487498", "0.49274477", "0.49074703", "0.49005723", "0.48901573", "0.4883832", "0.4881872", "0.488184", "0.48535037", "0.48535037", "0.48535037", "0.48520812", "0.48506558", "0.4844745", "0.484116", "0.483577", "0.4823727", "0.48191074", "0.48163524", "0.48103058", "0.4796781", "0.4765035", "0.47633338", "0.47624037", "0.4761818", "0.4748889", "0.47487128", "0.47448125", "0.47185794", "0.47123098", "0.47075853", "0.4702733", "0.47025707", "0.46970353", "0.46922418", "0.46824062", "0.46809623", "0.46565735", "0.46473718", "0.46375215", "0.4630388", "0.46086916", "0.46037015", "0.45944452", "0.45866084", "0.4584853", "0.45832267", "0.4582966", "0.45813087", "0.45813087", "0.45782375", "0.4574009", "0.4573327", "0.45733112", "0.45733112", "0.45733112", "0.45733112", "0.45733112", "0.45733112", "0.45733112", "0.45733112", "0.45699567", "0.45647055", "0.45634583", "0.45590857", "0.45582193", "0.45479873", "0.45406556", "0.45332405", "0.45303985", "0.45258096", "0.4524257", "0.45208877", "0.45176828" ]
0.76484793
0
Gets the process tree using Inductive Miner DirectlyFollows
def apply(log, parameters=None): if parameters is None: parameters = {} decreasingFactor = parameters[ "decreasingFactor"] if "decreasingFactor" in parameters else constants.DEFAULT_DEC_FACTOR activity_key = parameters[pm4_constants.PARAMETER_CONSTANT_ACTIVITY_KEY] if pm4_constants.PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else xes.DEFAULT_NAME_KEY log = attributes_filter.filter_log_on_max_no_activities(log, max_no_activities=constants.MAX_NO_ACTIVITIES, parameters=parameters) filtered_log = auto_filter.apply_auto_filter(log, parameters=parameters) activities_count = attributes_filter.get_attribute_values(filtered_log, activity_key) activities = list(activities_count.keys()) start_activities = list(start_activities_filter.get_start_activities(filtered_log, parameters=parameters).keys()) end_activities = list(end_activities_filter.get_end_activities(filtered_log, parameters=parameters).keys()) dfg = dfg_factory.apply(filtered_log, parameters=parameters) dfg = clean_dfg_based_on_noise_thresh(dfg, activities, decreasingFactor * constants.DEFAULT_DFG_CLEAN_MULTIPLIER, parameters=parameters) tree = inductive_miner.apply_tree_dfg(dfg, parameters=parameters, activities=activities, start_activities=start_activities, end_activities=end_activities) parameters["format"] = "svg" gviz = pt_vis_factory.apply(tree, parameters=parameters) gviz_base64 = base64.b64encode(str(gviz).encode('utf-8')) return get_base64_from_gviz(gviz), None, "", "xes", activities, start_activities, end_activities, gviz_base64, [], "tree", "freq", None, "", activity_key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prog_nodes(self):\n\n self.arbor._grow_tree(self)\n my_node = self\n while my_node is not None:\n yield my_node\n ancestors = list(my_node.ancestors)\n if ancestors:\n my_node = my_node.arbor.selector(ancestors)\n else:\n my_node = None", "def part1(input_lines):\n # This is a DAG problem. We need to form a dependency graph.\n tower = get_tower(input_lines)\n return find_root(tower)", "def extract_process_tree(\n procs: pd.DataFrame,\n schema: \"ProcSchema\", # type: ignore # noqa: F821\n debug: bool = False,\n) -> pd.DataFrame:\n # Clean data\n procs_cln, schema = _clean_proc_data(procs, schema)\n\n # Merge parent-child\n merged_procs = _merge_parent_by_time(procs_cln, schema)\n if debug:\n _check_merge_status(procs_cln, merged_procs, schema)\n\n # extract inferred parents\n merged_procs_par = _extract_inferred_parents(merged_procs, schema)\n if debug:\n _check_inferred_parents(merged_procs, merged_procs_par)\n\n # Create Process and parent Keys\n _assign_proc_key(\n merged_procs_par,\n Col.proc_key,\n Col.new_process_lc,\n schema.process_id,\n schema.time_stamp,\n )\n _assign_proc_key(\n merged_procs_par,\n Col.parent_key,\n Col.parent_proc_lc,\n schema.parent_id,\n Col.timestamp_orig_par,\n )\n return merged_procs_par", "def _subtree_preorder(self, p):\n yield p # visit p first before visiting its subtrees\n for c in self.children(p):\n for pos in self._subtree_preorder(c):\n yield pos", "def Trees_preOrder_traversal():\n # Python2 ported to Python3 via 2to3-3.7\n # URL:https://www.hackerrank.com/challenges/tree-preorder-traversal/problem\n def preOrder(root):\n # inorder: left root right\n # preorder: root, left, right 5,3,1,4,7,6,8\n # postorder: left,right, root\n # 5\n # 3 7\n # 1 4 6 8\n out = []\n to_proccess = [root]\n while to_proccess:\n node = to_proccess.pop()\n out.append(node.info)\n for child in [node.right, node.left]:\n if child:\n to_proccess.append(child)\n print(\" \".join(map(str, out)))\n\n def preOrder_recursive(root):\n def _preOrder(node):\n out = []\n out.append(node.info)\n for child in [node.left, node.right]:\n if child:\n out.extend(_preOrder(child))\n return out\n print(\" \".join(map(str, _preOrder(root))))", "def _subtree_preorder(self, p):\n yield p # visit p before its subtrees\n for c in self.children(p): # for each child c\n for other in self._subtree_preorder(c): # do preorder of c's subtree\n yield other # yielding each to our caller", "def tree(ctx):\n hokusai.print_command_tree(ctx.find_root().command)", "def program(self, p):\n return Tree('program', self.flatten([p[0], self.assemble_pre(p), p[-1], self.assemble_post(p)]))", "def _subtree_preorder(self, p):\n yield p # visit p before its subtrees\n for c in self.children(p): # for each child c\n for other in self._subtree_preorder(c): # do preorder of c's subtree\n yield other # yielding each to our caller", "def preorder(self):\n if not self.is_empty():\n for p in self._subtree_preorder(self.root()): # start recursion\n yield p", "def tree(self):\n # type: () -> Optional[Module]\n return self._tree", "def preorder(self):\n if not self.is_empty():\n for p in self._subtree_preorder(self.root()): # start recursion\n yield p", "def get_move(self, state):\n # this method should only be called when self is real root.,so that's here where we can should use mutiprocess\n if self._root.is_leaf(): # no expanded children yet\n action_probs, _ = self._policy(state)\n self._root.expand(action_probs)\n\n the_children = self._root._children\n i = 0\n sorted_children = sorted(the_children.items(), key=lambda act_node: act_node[1].get_value(self._c_puct))\n for child_node in sorted_children:\n i += 1\n child_tree = MCTS(policy_value_fn,root=child_node[1])\n state_copy = copy.deepcopy(state)\n state_copy.do_move(child_node[0])\n visits_count = 0\n for j in range(0,relu(1200-i*20),10): # at least run one time\n child_tree._playout(copy.deepcopy(state_copy))\n visits_count += 1\n self._root.update(-child_tree.get_root_node().last_leafvalue,visits_count=visits_count) # update real root\n child_tree.get_root_node().set_parent(self._root) # to link the sub tree\n\n '''\n for n in range(self._n_playout):\n # get top n (assumed to be 6) nodes from children\n # step1 let all children of root have chance to run in parallel\n # adjust the round count of children by value\n if n%6 == 0:\n the_children = self._root._children\n top_n = sorted(the_children.items(),key=lambda act_node: act_node[1].get_value(self._c_puct))[:6]\n for child_node in top_n:\n # child_tree = MCTS(policy_value_fn,copy.deepcopy(child_node)) # use copy because we will use it in multiprocess\n child_tree = MCTS(policy_value_fn,\n child_node) \n state_copy = copy.deepcopy(state)\n state_copy.do_move(child_node[0])\n child_tree._playout(state_copy)\n self._root.update(-child_tree.get_root_node().last_leafvalue) # update real root\n child_tree.get_root_node().set_parent(self._root) # to link the sub tree\n # self._root.get_children()[child_node[0]] = child_tree.get_root_node() # copy sub tree\n '''\n\n '''\n return max(self._root._children.items(),\n # key=lambda act_node: act_node[1].get_visits())[0]\n key=lambda act_node: act_node[1].get_value(self._c_puct))[0]\n '''\n\n for n in range(300):\n state_copy = copy.deepcopy(state)\n self._playout(state_copy)\n return max(self._root._children.items(),\n key=lambda act_node: act_node[1].get_value(self._c_puct))[0]", "def preorder(self):\n return (node for node in self.get_preorder(self.root))", "def preorder(self):\n\n traversal = []\n self.preorder_helper(self.root, traversal)\n return traversal", "def traverse_graph_start(graph):\n\n def traverse(graph, node):\n\n children = [int(c) for c in graph[node][\"children\"]]\n tagged_children = []\n for child in children:\n ellipsed_parents = [int(p) for p in graph[child][\"ellipsed_parents\"]]\n # if the child is explicit\n if node not in ellipsed_parents:\n if graph[child][\"terminal\"] == \"yes\":\n tagged_children.append(ParentedTree(graph[child][\"tag\"], [graph[child][\"text\"]]))\n else:\n tagged_children.append(traverse(graph, child))\n # if the child is ellipsed\n else:\n ellipsis_tag = get_ellipsis_tag_from_graph(graph, child)\n tagged_children.append(ParentedTree(ellipsis_tag, []))\n \n tree = ParentedTree(graph[node][\"tag\"], tagged_children)\n\n return tree\n \n tree = traverse(graph, 0)\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n rev_positions = [pos for pos in reversed(positions)]\n for pos_i, pos in enumerate(rev_positions):\n # append starting_node tag to the previous node\n if tree[pos].label().startswith(\"start\"):\n prev_pos_i = pos_i + 1\n prev_pos = rev_positions[prev_pos_i]\n tree[prev_pos].set_label(tree[prev_pos].label() + tree[pos].label())\n del tree[pos]\n\n return tree", "def inference_graph(self, input_data):\n return self.inference_ops.tree_predictions(\n input_data, self.variables.tree, self.variables.tree_thresholds,\n self.variables.node_sums,\n valid_leaf_threshold=self.params.valid_leaf_threshold)", "def preorder(self):\n if not self.is_empty():\n for p in self._subtree_preorder(self.root()):\n yield p", "def preorder(self):\n if not self.is_empty():\n for p in self._subtree_preorder(self.root()):\n yield p", "def execute(self):\n if len(self._tree) > 0:\n return self._tour(self._tree.root(),0,[]) # start the recursion", "def root_node(self):\n return self.process_tree", "def _minlex_postorder_traversal(self, root):\n\n # We compute a dictionary mapping from internal node ID to min leaf ID\n # under the node, using a first postorder traversal\n min_leaf = {}\n for u in self.nodes(root, order=\"postorder\"):\n if self.is_leaf(u):\n min_leaf[u] = u\n else:\n min_leaf[u] = min(min_leaf[v] for v in self.children(u))\n\n stack = []\n\n def push(nodes):\n stack.extend(sorted(nodes, key=lambda u: min_leaf[u], reverse=True))\n\n # The postorder traversal isn't robust to using virtual_root directly\n # as a node because we depend on tree.parent() returning the last\n # node we visiting on the path from \"root\". So, we treat this as a\n # special case.\n is_virtual_root = root == self.virtual_root\n roots = self.roots if root == -1 or is_virtual_root else [root]\n\n push(roots)\n parent = NULL\n while len(stack) > 0:\n v = stack[-1]\n children = [] if v == parent else self.children(v)\n if len(children) > 0:\n # The first time visiting a node, we push onto the stack its children\n # in order of reverse min leaf ID under each child. This guarantees\n # that the earlier children visited have smaller min leaf ID,\n # which is equivalent to the minlex condition.\n push(children)\n else:\n # The second time visiting a node, we pop and yield it, and\n # we update the parent variable\n parent = self.get_parent(v)\n yield stack.pop()\n if is_virtual_root:\n yield self.virtual_root", "def Visit(self, node, parent, is_group):\n command = cli_tree.Command(node, parent, include_hidden_flags=False)\n return command", "def get_predeccessor(self, root):\n # Take a right traversal and go left\n # As much as you can. Last node will be successor\n root = root.left\n while root.right:\n root = root.right\n return root", "def preorder_visit(t: Tree, act: Callable[[Tree], Any]) -> None:\n act(t)\n for child in t.children:\n preorder_visit(child, act)", "def run_process(self, process, piece='a', number=1, comment='', force_refresh=True):\n branch = self.get_piece(piece)\n node = self._insert_node(process, piece, number, branch, comment)\n if force_refresh: # workaround to force the root node to update\n self.refresh_tree()\n return node", "def filetree(self) -> P:\n ...", "def pslist(self) -> Generator[dict, None, None]:\n\n # Function to switch fields to represent a parent\n def _convert_to_parent_fields(process: dict) -> dict:\n output = {}\n for left, right in [\n (FieldNames.PROCESS_IMAGE, FieldNames.PARENT_PROCESS_IMAGE),\n (FieldNames.PROCESS_ID, FieldNames.PARENT_PROCESS_ID),\n (FieldNames.COMMAND_LINE, FieldNames.PARENT_COMMAND_LINE),\n (FieldNames.PROCESS_IMAGE_PATH, FieldNames.PARENT_PROCESS_IMAGE_PATH),\n ]:\n output[right] = process[left]\n\n return output\n\n # Use the pstree dict output to get a mapping from pid -> proc\n procs = self.session.plugins.pstree()._make_process_dict()\n\n parent_procs: Dict[int, dict] = {}\n\n # Add the system idle process\n parent_procs[0] = {\n FieldNames.PARENT_PROCESS_ID: 0,\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"System Idle Process\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\\\\\",\n }\n\n for proc in procs.values():\n\n parent_pid = proc.InheritedFromUniqueProcessId\n\n # Get the current processes info\n command_line = str(proc.Peb.ProcessParameters.CommandLine)\n image_path = str(proc.Peb.ProcessParameters.ImagePathName)\n\n if int(proc.pid) == 4:\n process_image = \"SYSTEM\"\n process_image_path = \"\\\\\"\n else:\n process_image, process_image_path = split_path(image_path)\n\n current_proc = {\n FieldNames.EVENT_TYPE: EventTypes.PROCESS_LAUNCHED,\n FieldNames.PROCESS_ID: int(proc.pid),\n FieldNames.COMMAND_LINE: command_line,\n FieldNames.PROCESS_IMAGE: process_image,\n FieldNames.PROCESS_IMAGE_PATH: process_image_path,\n }\n\n # Keep track of the processes.\n self.processes[int(proc.pid)] = current_proc\n\n current_as_parent = _convert_to_parent_fields(current_proc)\n parent_procs[int(proc.pid)] = current_as_parent\n\n # Parse the parent process\n if parent_pid not in parent_procs:\n\n # Do we the _EPROCESS for this process?\n if int(parent_pid) in procs:\n parent = procs[int(parent_pid)]\n parent_image_path = parent.Peb.ProcessParameters.ImagePathName\n\n parent_process_image, parent_process_image_path = split_path(\n str(parent_image_path)\n )\n\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent.pid),\n FieldNames.PARENT_COMMAND_LINE: parent.Peb.ProcessParameters.CommandLine,\n FieldNames.PARENT_PROCESS_IMAGE: parent_process_image,\n FieldNames.PARENT_PROCESS_IMAGE_PATH: parent_process_image_path,\n }\n\n # If not, make a dummy one with the PID\n else:\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent_pid),\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\",\n }\n\n parent_procs[int(parent_pid)] = parent_proc\n\n yield {**current_proc, **parent_procs[int(parent_pid)]}", "def pstree(request):\n import subprocess\n import urllib\n import re\n\n if request.REQUEST.get('sleep'):\n sleep = int(request.REQUEST.get('sleep'))\n time.sleep(sleep)\n\n # Call ps\n p = subprocess.Popen(args=[\"ps\", \"-axwwo\", \"user,pid,ppid,pgid,cputime,command\"], stdout=subprocess.PIPE)\n\n children = {}\n first = True\n if \"subtree\" in request.GET:\n subtree = long(request.GET.get(\"subtree\"))\n else:\n subtree = None\n subtree_top = None\n\n # Parse in the data\n for row in p.stdout:\n if first:\n # skip header line\n first = False\n continue\n data = user, pid, ppid, pgid, cputime, command = re.split(\"\\s+\", row.rstrip(), 5)\n ps = PsLine(*data)\n if ps.pid == subtree:\n subtree_top = ps\n if ps.ppid in children:\n children[ps.ppid].append(ps)\n else:\n children[ps.ppid] = [ps]\n\n # Utility method to create the tree\n def fill(root, current_path):\n root.path = current_path + str(root.pid)\n root.children = children.get(root.pid, [])\n for child in root.children:\n fill(child, root.path + \"/\")\n\n\n # Start with init and create the tree\n assert len(children[0]) == 1\n top = children[0][0]\n fill(top, \"/\")\n tops = [top]\n\n # If we're only interested in a subtree, pick that out explicitly\n if subtree_top:\n tops = subtree_top.children\n\n # Methods to manipulate the extant paths list; used by the template.\n def add(p):\n paths = list(request.GET.getlist(\"paths\")) # make a copy\n paths.append(p)\n query = [urllib.urlencode([(\"paths\", x)]) for x in paths]\n if subtree:\n query.append('subtree=' + str(subtree))\n return request.path + \"?\" + \"&\".join(query)\n def remove(p):\n paths = list(request.GET.getlist(\"paths\")) # make a copy\n paths.remove(p)\n query = [urllib.urlencode([(\"paths\", x)]) for x in paths]\n if subtree:\n query.append('subtree=' + str(subtree))\n return request.path + \"?\" + \"&\".join(query)\n\n paths = request.GET.getlist(\"paths\")\n return render(\"html-table.treeview.ajax.mako\", request, dict(\n tops=tops, show_all=request.GET.get(\"show_all\"), \n open_paths=paths, request_path=request.path,\n add=add, remove=remove, depth=request.GET.get('depth', 0)))", "def inference_graph(self, input_data):\n probabilities = []\n for i in range(self.params.num_trees):\n with tf.device(self.device_assigner.get_device(i)):\n tree_data = input_data\n if self.params.bagged_features:\n tree_data = self._bag_features(i, input_data)\n probabilities.append(self.trees[i].inference_graph(tree_data))\n with tf.device(self.device_assigner.get_device(0)):\n all_predict = tf.pack(probabilities)\n return tf.reduce_sum(all_predict, 0) / self.params.num_trees", "def preorder(self, u=NULL):\n return self._ll_tree.get_preorder(u)", "def preorder_print(self, start, traversal):\n return traversal", "def _subtree_first_position(self, p):\n walk = p\n while self.left(walk) is not None:\n walk = self.left(walk) # keep walking left\n return walk", "def ascend(self):\n node = self.parent\n while node:\n yield node\n node = node.parent", "def getNextNodeUsingTotalStepsToTravel(kGoalState):\n \n global fringe\n global solutions\n\n\n\n minNode = None\n minCost = 99999999999\n minNodeIndex = -1\n\n \n pnode = None\n pcost = None\n\n if len(solutions)>0 and solutions[0] != None:\n pnode = solutions[0];\n pcost = getTotalStepsToReachGoalState(pnode,kGoalState)\n # print pnode, pcost\n # raw_input()\n \n\n\n\n for idx,node in enumerate(fringe):\n #get the heu. function values\n g_value =getTotalStepsToReachGoalState(node,kGoalState)\n \n\n if g_value < minCost:\n minNode = node\n minNodeIndex = idx\n minCost = g_value\n\n\n fringe.pop(minNodeIndex)\n c = getTotalStepsToReachGoalState(minNode,kGoalState)\n if pnode != None:\n if c > pcost:\n minNode = None\n \n return minNode", "def pre_order_traversal(self):\n\n elements = []\n\n ##visit base node\n elements.append(self.data)\n\n ##visit left tree\n if self.left:\n elements += self.left.pre_order_traversal()\n\n #visit right tree\n if self.right:\n elements += self.right.pre_order_traversal()\n\n return elements", "def xincludeProcessTree(self):\n ret = libxml2mod.xmlXIncludeProcessTree(self._o)\n return ret", "def preorder_iterator(node):\n yield node\n for child in node.children:\n yield from preorder_iterator(child)", "def __call__(self, node):\n if not node.children: return;\n if len(node.children) <= 2: return;\n if self.IsGoodTriple(node.children): return;\n if len(node.children) >= 8: raise ValueError(\"Too long to decompose\");\n children = map(lambda x : [self.GetLabel(x)], node.children);\n #print \"Guessing %s\" % children;\n print node.ToPrettyString();\n res = self.path_finder.FindPath(children, self.GetLabel(node));\n if len(res) != 0:\n print res[0];\n tnodes, count = self.Transform(res[0][1], node, 0);\n node.children = tnodes.children;\n else:\n raise ValueError(\"Find no production chains to decompose for %s\" % children);\n print node.ToPrettyString();", "def predecessor_tree(g, pred_map):\n\n _check_prop_scalar(pred_map, \"pred_map\")\n pg = Graph()\n libgraph_tool_generation.predecessor_graph(g._Graph__graph,\n pg._Graph__graph,\n _prop(\"v\", g, pred_map))\n return pg", "def tree(ctx):\n root_cmd = _build_command_tree(ctx.find_root().command)\n _print_tree(root_cmd)", "def inorder_iterative(root: Optional[Node]):\n stack: list = []\n\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n\n root = stack.pop()\n\n if root:\n print(root.data, end=\" \")\n root = root.right\n print()", "def pre_order_helper(self, node, alist=[], verbose=False):\n if node:\n if verbose:\n print(node.data)\n alist.append(node)\n pre_order_helper(node.left, alist, verbose)\n pre_order_helper(node.right, alist, verbose)", "def execute(self):\n if self.backend is not None:\n self._process_node(self.ast)\n return self.backend.get_output()\n else:\n raise UnrollerError(\"backend not attached\")", "def execute(self):\n if len(self._tree) > 0:\n return self._tour(self._tree.root(), 0, [])", "def execute(self):\n if len(self._tree) > 0:\n return self._tour(self._tree.root(), 0, [])", "def pre_order_traversal(self) -> Queue:\n q = Queue() # Initializing queue\n if self.root is None: # If tree is empty\n return q\n\n self.pre_order_helper(self.root, q)\n return q", "def execute(self):\n if len(self._tree) > 0:\n return self.tour(self._tree.root(), 0, [])", "def pre_order_traversal(self) -> Queue:\n # initialize\n q = Queue()\n\n #binary search tree == empty\n if self.root is None:\n return q\n\n #recursive helper return Queue\n self.pre_order_helper(self.root, q)\n return q", "def breadth_first_traverse(self) -> Generator:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node = assist_queue.popleft()\n yield current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)", "def _operation_tree(self):\n\n # initial state\n i = 0\n level = 0\n stack = []\n current = None\n\n def _create_operation(args):\n profile_stats = None\n name = args[0].strip()\n args.pop(0)\n if len(args) > 0 and \"Records produced\" in args[-1]:\n records_produced = int(\n re.search(\"Records produced: (\\\\d+)\", args[-1]).group(1)\n )\n execution_time = float(\n re.search(\"Execution time: (\\\\d+.\\\\d+) ms\", args[-1]).group(1)\n )\n profile_stats = ProfileStats(records_produced, execution_time)\n args.pop(-1)\n return Operation(\n name, None if len(args) == 0 else args[0].strip(), profile_stats\n )\n\n # iterate plan operations\n while i < len(self.plan):\n current_op = self.plan[i]\n op_level = current_op.count(\" \")\n if op_level == level:\n # if the operation level equal to the current level\n # set the current operation and move next\n child = _create_operation(current_op.split(\"|\"))\n if current:\n current = stack.pop()\n current.append_child(child)\n current = child\n i += 1\n elif op_level == level + 1:\n # if the operation is child of the current operation\n # add it as child and set as current operation\n child = _create_operation(current_op.split(\"|\"))\n current.append_child(child)\n stack.append(current)\n current = child\n level += 1\n i += 1\n elif op_level < level:\n # if the operation is not child of current operation\n # go back to it's parent operation\n levels_back = level - op_level + 1\n for _ in range(levels_back):\n current = stack.pop()\n level -= levels_back\n else:\n raise Exception(\"corrupted plan\")\n return stack[0]", "def pre_order(self):\n stack = []\n node = self\n while stack or node:\n if node:\n yield node.val\n stack.append(node)\n node = node.left\n else:\n node = stack.pop()\n node = node.right", "def depthOrBreadthFirstSearch(problem, container):\n firstNode = (problem.getStartState(), None, 0, None)#state, action to reach, incremental cost, parent node\n container.push(firstNode)\n visitedStates = []\n while (not container.isEmpty()):\n if problem.getNodesExpandedNum() > MAX_NODES_TO_EXPLORE:\n return None\n curNode = container.pop()\n if (problem.isGoalState(curNode[0])):\n return getStatePathFromNode(curNode, problem)\n for successor in problem.getSuccessors(curNode[0]):\n if not successor[0] in visitedStates:\n successorNode = (successor[0], successor[1], successor[2], curNode)\n visitedStates.append(successor[0])\n container.push(successorNode)\n return None", "def depthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\r\n\tfrontera = util.Stack()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def depthFirstSearch(problem):\n #\"*** YOUR CODE HERE ***\"\n\n \"\"\"\n Pseudocode:\n function G RAPH-S EARCH ( problem) returns a solution, or failure\n initialize the frontier using the initial state of problem\n initialize the explored set to be empty\n loop do\n if the frontier is empty then return failure\n choose a leaf node and remove it from the frontier\n if the node contains a goal state then return the corresponding solution\n add the node to the explored set\n expand the chosen node, adding the resulting nodes to the frontier\n only if not in the frontier or explored set\n\n \"\"\"\n frontier = util.Stack()\n #print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node)\n #print 'Push ',repr(initial_node.state)\n frontierSet = set([initial_node.state])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove(currNode.state)\n #print 'Remove',repr(currNode.state)\n #print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n #print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored):\n # Si hacemos estas verificaciones entonces cuando se encuentra que un estado que se quiere expandir ya esta en la frontera\n # eliminamos ese estado de la frontera y lo expandimos ahora. Osea, damos prioridad a los nodos nuevos\n if(succNode.state in frontierSet):\n # Recurso'i:\n for frontierNode in frontier.list:\n if frontierNode.state == succNode.state:\n frontier.list.remove(frontierNode)\n frontierSet.remove(frontierNode.state)\n # if ((succNode.state not in explored) and (succNode.state not in frontierSet)): \n # Alternativa segun el libro. Lo que se hace es que se da prioridad a los nodos viejos.\n\n # Aca no verificaba si ya esta en la frontera porque alteraba el orden en el que se visitan los nodos.\n # Por ejemplo cuando esta pendiente (se genero pero no se expandio) un hijo con un estado,\n # pero en un nivel mas profundo se vuelve a generar el mismo estado y se tiene que expandir.\n # Si seguimos el DFS creo que tendriamos que expandir ese nodo ahi y no en la primera llamada donde quedo pendiente.\n \n frontier.push(succNode)\n #print 'Push ',repr(succNode.state)\n frontierSet.add(succNode.state)\n\n #util.raiseNotDefined()", "def inorder(self):\n if not self.is_empty():\n for p in self._subtree_inorder(self.root()):\n yield p", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n frontier = util.Stack()\n start_node = problem.getStartState()\n\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,[]))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n explored.add(node[0])\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1][:]\n actions.append(action)\n new_node = (nextState, actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def _for_process_and_descendants(function, proc):\n return (function(proc) +\n sum(function(child)\n for child in proc.get_children(recursive=True)))", "def recursive():\n with Local() as tun:\n tun.call(recursive)", "def pre_order_traversal(self, cur_node=None):\n if cur_node is None:\n cur_node = self.root\n if cur_node is None:\n return\n visited = []\n visited.append(cur_node)\n\n while len(visited) > 0:\n cur_node = visited.pop()\n yield cur_node.data\n if cur_node.right:\n visited.append(cur_node.right)\n if cur_node.left:\n visited.append(cur_node.left)", "def step(tree):\n if type(tree) == list and type(tree[0]) == tuple:#This basically looks for any applications it can do directly. These applications are the ones where the function is already defined through abstraction. That's why it checks whether the first element of the list (for application) is an abstraction\n func = tree[0]#The whole function with parameter and body\n name = func[0][1]#Only the parameter\n func = func[1]#Only the body\n arg = tree[1]\n nfunc = replace(name, arg, func)#The replacement of all occurences of the parameter in the body with the argument\n return nfunc\n elif type(tree) == list:\n return [step(tree[0]), step(tree[1])]#recursive checking, again\n elif type(tree) == tuple:\n return (tree[0], step(tree[1]))\n else:\n return tree", "def depth_first_traversal_iterative(self, start):\n try:\n res = []\n stack = Stack([start])\n track = set()\n while stack.top:\n cur_node = stack.pop()\n if cur_node not in track:\n res.append(cur_node)\n track.add(cur_node)\n for child in reversed(self.node_dict[cur_node]):\n stack.push(child)\n except KeyError:\n raise KeyError(str(start) + ' not in graph')\n return res", "def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)", "def _subtree_first_position(self, p):\n \"\"\"will be used by before()\"\"\"\n walk = p\n #recursivly walking to the left child until the left subtree has no child\n while self.left(walk) is not None:\n walk = self.left(walk)\n return walk", "def _subtree_inorder(self, p):\n if self.left(p) is not None: # if left child exists, traverse its subtree\n for other in self._subtree_inorder(self.left(p)):\n yield other\n yield p # visit p between its subtrees\n if self.right(p) is not None: # if right child exists, traverse its subtree\n for other in self._subtree_inorder(self.right(p)):\n yield other", "def preorder_visit(t: Tree, act: Callable[[Tree], None]) -> None:\n if t.value is None:\n pass\n else:\n act(t)\n for subtree in t.children:\n preorder_visit(subtree, act)", "def search(G):\n visited = set()\n \n for v in range(len(G)):\n if v not in visited:\n yield v,v,forward\n visited.add(v)\n stack = [(v,iter(G[v]))]\n while stack:\n parent,children = stack[-1]\n try:\n child = next(children)\n if child in visited:\n yield parent,child,nontree\n else:\n yield parent,child,forward\n visited.add(child)\n stack.append((child,iter(G[child])))\n except StopIteration:\n stack.pop()\n if stack:\n yield stack[-1][0],parent,reverse\n yield v,v,reverse", "def in_order_traversal(self):\n root = self.root\n self.traverse = self.in_order_traversal_node(root)\n return self.traverse", "def __(self):\n self.__pepth__ = self.pdepth(True)\n return self", "def process_tree(tree):\n c = circuit()\n l = line()\n names = {}\n procedures = []\n for lst in tree.children:\n print(lst)\n if type(lst[0]) is str:\n names[lst[0]] = lst[1]\n else:\n procedures.append(lst)\n print(names)\n #print(procedures)\n\n for proc in procedures:\n\n proc_elements_names = proc[0]\n proc_name = proc[1]\n\n #print(proc_elements_names)\n #print(proc_name)\n\n if proc_name == \"set_mode\":\n mode_name = proc_elements_names[0]\n if mode_name != \"draw-mode\": \n c.set_mode(mode_name)\n elif mode_name == \"draw-mode\":\n l1 = line()\n # draw mode is different from other modes\n for element in names:\n e = CompleteElement(element)\n e.set_other_attrs(names[element])\n e.process_other_attrs()\n l1.addElement(e)\n c.connectInSeries(l1)\n c.set_mode(\"draw-mode\")\n \n \n if proc_name == \"series\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n l = l1\n c.connectInSeries(l)\n #raise SyntaxError(\"Alias {0} referrenced before assignment\".format(item[0]))\n\n elif proc_name == \"parallel\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n c.connectInParallel(l1)\n l1 = line()\n\n\n elif proc_name == \"add_parallel\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n l1 = line()\n l1.addElement(names[new_element])\n c.connection.append(l1)\n\n\n elif proc_name == \"add_series\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n for ln in c.connection:\n for e in ln.elements:\n if names[old_element] == e:\n ln.addElement(names[new_element])\n\n\n c.evaluate(\"output.png\")\n #print(c)", "def inorderTraversalIterative(self, root: TreeNode) -> List[int]:\r\n stack = list()\r\n inorder_traversal = list()\r\n node = root\r\n while node or len(stack):\r\n if node:\r\n stack.append(node)\r\n node = node.left\r\n else:\r\n node = stack.pop()\r\n inorder_traversal.append(node.val)\r\n node = node.right\r\n return inorder_traversal", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n from game import Directions\n visited = set() # unique elements\n state = problem.getStartState()\n #returns starting agent's position\n waiting_list = util.Stack()\n # LIFO\n # last in first out\n # parents = collections.defaultdict(collections.UserDict)\n parents = {}\n #dictionary\n sequence = []\n #LIFO\n for action in problem.getSuccessors(state):\n # in order to push full-state values\n waiting_list.push(action)\n # enumarating tuple\n\n while not waiting_list.isEmpty():\n state = waiting_list.pop()\n \n visited.add(state[0])\n # node is visited and we wont visit those nodes\n \n for substate in problem.getSuccessors(state[0]):\n # take a look to successors of current node\n \n if substate[0] not in visited:\n # if not in visited \n # saving parents\n parents[substate[0]]={'parent':state} \n # generate new node\n waiting_list.push(substate)\n # push to stack\n if problem.isGoalState(substate[0]): \n target_state = substate \n #finding wayback\n\n\n while target_state[0] in parents.keys():\n temp=parents[target_state[0]]['parent']\n sequence.append(target_state[1])\n target_state = temp\n sequence.append(target_state[1])\n return sequence[::-1]", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()", "def tree(node):\n subtrees = []\n for arg in node.args:\n subtrees.append(tree(arg))\n s = print_node(node)+pprint_nodes(subtrees)\n return s", "def tree(self):\r\n return self._tree", "def solve(self):\n self.left -= len(self.nodes)\n \n def depths(x,depth = 0):\n depth+=1\n for y in self.graph[x]:\n if y in self.nodes:\n self.nodes.remove(y)\n depth = depths(y,depth)\n return depth\n \n while len(self.nodes):\n x = self.nodes.pop()\n self.firstGen.append(depths(x))\n #print self.graph\n #print self.nodes\n #print self.firstGen", "def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up", "def get_original_tree(self, tree):\n if not tree:\n return\n tree = copy.deepcopy(tree)\n PCFG.__revert_step_4(tree.root)\n PCFG.__revert_step_2(tree.root)\n # Get rid of step 1, namely get rid of S_0 -> S\n new_root = tree.root.children[0]\n new_tree = ParseTree(new_root, tree.probability)\n return new_tree", "def task1(graph, n):\r\n alreadyProcessed = set()\r\n B = [j for j in range(1,n+1)]\r\n position = {B[i]:i for i in range(len(B))}\r\n leftNeighbors = {}\r\n parent = {}\r\n \r\n for v in B:\r\n # nodes processed before the current that have an edge in common are left neighbors\r\n leftNeighbors[v] = set(graph._graph[v]) & alreadyProcessed\r\n alreadyProcessed.add(v)\r\n if leftNeighbors[v]:\r\n # the parent is the closest left neighbor \r\n parent[v] = B[max([position[w] for w in leftNeighbors[v]])]\r\n # if this node's neighbors (other then the parent itself) are not a subset of the parent's neighbors \r\n # it means that it's not a lexOrder\r\n if not leftNeighbors[v] - {parent[v]} <= leftNeighbors[parent[v]]:\r\n return []\r\n return B", "def job_tree(self):\n\n # 1. Enforce depth of 1 for steps\n def depth_one(steps):\n depth_one = []\n for step in steps:\n if type(step) is list:\n if type(step[0]) is list:\n depth_one.append(step[0])\n else:\n depth_one.append(step)\n else:\n depth_one.append([step])\n return depth_one\n\n # 2. Convert steps to list of node objects (0,1,2,3...)\n def assign_nodes(steps):\n nodes = [i for i in range(len(steps))]\n objects = list(\n set([elem for sublist in steps for elem in sublist]))\n\n # checks for multiple src and dst objects -- added when looking for\n # mutiples\n split_objects = []\n for obj in objects:\n if len(obj) > 1:\n new_objs = obj.split(\", \")\n split_objects.extend(new_objs)\n else:\n split_objects.append(obj)\n objects = split_objects\n del(split_objects)\n\n # populate with leafless trees (Node objects, no edges)\n for node in nodes:\n nodes[node] = Node(str(node))\n\n # search for leafy trees\n for obj in objects:\n\n # accounts for multiple drc/dst objects\n leaves = []\n for i, sublist in enumerate(steps):\n for string in sublist:\n if string.count(',') > 0:\n if obj in string:\n leaves.append(i)\n else:\n if obj in sublist:\n leaves.append(i)\n leaves = sorted(list(set(leaves)))\n\n if len(leaves) > 1:\n viable_edges = []\n\n # compute cross-product\n for leaf1 in leaves:\n for leaf2 in leaves:\n if str(leaf1) != str(leaf2) and sorted((leaf1, leaf2)) not in viable_edges:\n viable_edges.append(sorted((leaf1, leaf2)))\n\n # form edge networks\n for edge in viable_edges:\n n1, n2 = nodes[edge[0]], nodes[edge[1]]\n n1.add_edge(n2)\n n2.add_edge(n1)\n nodes[int(n1.name)], nodes[int(n2.name)] = n1, n2\n return nodes\n\n # 3. Determine number of trees and regroup by connected nodes\n def connected_nodes(nodes):\n proto_trees = []\n nodes = set(nodes)\n\n while nodes:\n n = nodes.pop()\n group = {n}\n queue = [n]\n while queue:\n n = queue.pop(0)\n neighbors = n.edges\n neighbors.difference_update(group)\n nodes.difference_update(neighbors)\n group.update(neighbors)\n queue.extend(neighbors)\n proto_trees.append(group)\n return proto_trees\n\n # 4. Convert nodes to nested dictionary of parent-children relations\n # i.e. adding depth -- also deals with tree-node sorting and path\n # optimization\n def build_tree_dict(trees, steps):\n # node sorting in trees\n sorted_trees = []\n for tree in trees:\n sorted_trees.append(\n sorted(tree, key=lambda x: int(x.name)))\n\n # retrieve values of the nodes (the protocol's containers)\n # for each tree ... may want to use dictionary eventually\n all_values = []\n for tree in sorted_trees:\n values = [steps[int(node.name)] for node in tree]\n all_values.append(values)\n\n # create relational tuples:\n all_digs = []\n singles = []\n dst_potentials = []\n for tree_idx in range(len(sorted_trees)):\n edge_flag = False\n tree_digs = []\n for node_idx in range(len(sorted_trees[tree_idx])):\n\n # digs: directed graph vectors\n digs = []\n dst_nodes = []\n node_values = all_values[tree_idx][node_idx]\n src_node = str(sorted_trees[tree_idx][node_idx].name)\n\n # ACTION ON MULTIPLE OBJECTS (E.G. TRANSFER FROM SRC -> DST\n # WELLS)\n # Outcome space: {1-1, 1-many, many-1, many-many}\n if len(node_values) == 2:\n # single destination (x-1)\n if node_values[1].count(\",\") == 0:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[1] == sublist[0]]\n # multiple destinations (x-many)\n elif node_values[1].count(\",\") > 0:\n dst_nodes = []\n for dst in node_values[1].replace(\", \", \"\"):\n for i, sublist in enumerate(steps):\n if i not in dst_nodes and dst == sublist[0]:\n dst_nodes.append(i)\n\n # ACTION ON A SINGLE OBJECT\n elif len(node_values) == 1:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[0] == sublist[0]]\n\n # Constructing tuples in (child, parent) format\n for dst_node in dst_nodes:\n dig = (int(dst_node), int(src_node))\n digs.append(dig)\n\n # else: an edge-case for dictionaries constructed with no edges\n # initiates tree separation via flag\n if digs != []:\n edge_flag = False\n tree_digs.append(digs)\n else:\n edge_flag = True\n digs = [(int(src_node), int(src_node))]\n tree_digs.append(digs)\n\n # digraph cycle detection: avoids cycles by overlooking set\n # repeats\n true_tree_digs = []\n for digs in tree_digs:\n for dig in digs:\n if tuple(sorted(dig, reverse=True)) not in true_tree_digs:\n true_tree_digs.append(\n tuple(sorted(dig, reverse=True)))\n\n # edge-case for dictionaries constructed with no edges\n if true_tree_digs != [] and edge_flag == False:\n all_digs.append(true_tree_digs)\n elif edge_flag == True:\n all_digs.extend(tree_digs)\n\n # Enforces forest ordering\n all_digs = sorted(all_digs, key=lambda x: x[0])\n\n # job tree traversal to find all paths:\n forest = []\n for digs_set in all_digs:\n\n # pass 1: initialize nodes dictionary\n nodes = OrderedDict()\n for tup in digs_set:\n id, parent_id = tup\n # ensure all nodes accounted for\n nodes[id] = OrderedDict({'id': id})\n nodes[parent_id] = OrderedDict({'id': parent_id})\n\n # pass 2: create trees and parent-child relations\n for tup in digs_set:\n id, parent_id = tup\n node = nodes[id]\n # links node to its parent\n if id != parent_id:\n # add new_node as child to parent\n parent = nodes[parent_id]\n if not 'children' in parent:\n # ensure parent has a 'children' field\n parent['children'] = []\n children = parent['children']\n children.append(node)\n\n desired_tree_idx = sorted(list(nodes.keys()))[0]\n forest.append(nodes[desired_tree_idx])\n return forest\n\n # 5. Convert dictionary-stored nodes to unflattened, nested list of\n # parent-children relations\n def dict_to_list(forest):\n forest_list = []\n for tree in forest:\n tString = str(json.dumps(tree))\n tString = tString.replace('\"id\": ', \"\").replace('\"children\": ', \"\").replace(\n '[{', \"[\").replace('}]', \"]\").replace('{', \"[\").replace('}', \"]\")\n\n # find largest repeated branch (if applicable)\n # maybe think about using prefix trees or SIMD extensions for better\n # efficiency\n x, y, length, match = 0, 0, 0, ''\n for y in range(len(tString)):\n for x in range(len(tString)):\n substring = tString[y:x]\n if len(list(re.finditer(re.escape(substring), tString))) > 1 and len(substring) > length:\n match = substring\n length = len(substring)\n\n # checking for legitimate branch repeat\n if \"[\" in match and \"]\" in match:\n hits = []\n index = 0\n if len(tString) > 3:\n while index < len(tString):\n index = tString.find(str(match), index)\n if index == -1:\n break\n hits.append(index)\n index += len(match)\n\n # find all locations of repeated branch and remove\n if len(hits) > 1:\n for start_loc in hits[1:]:\n tString = tString[:start_loc] + \\\n tString[start_loc:].replace(match, \"]\", 1)\n\n # increment all numbers in string to match the protocol\n newString = \"\"\n numString = \"\"\n for el in tString:\n if el.isdigit(): # build number\n numString += el\n else:\n if numString != \"\": # convert it to int and reinstantaite numString\n numString = str(int(numString) + 1)\n newString += numString\n newString += el\n numString = \"\"\n tString = newString\n del newString\n\n forest_list.append(ast.literal_eval(tString))\n return forest_list\n\n # 6. Print job tree(s)\n def print_tree(lst, level=0):\n print(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))\n for l in lst[1:]:\n if type(l) is list:\n print_tree(l, level + 1)\n else:\n print(' ' * level + '+---' + l)\n\n # 1\n steps = depth_one(self.object_list)\n # 2\n nodes = assign_nodes(steps)\n # 3\n proto_forest = connected_nodes(nodes)\n # 4\n forest = build_tree_dict(proto_forest, steps)\n # 5\n self.forest_list = dict_to_list(forest)\n # 6\n print(\"\\n\" + \"A suggested Job Tree based on container dependency: \\n\")\n for tree_list in self.forest_list:\n print_tree(tree_list)", "def depth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))", "def reconstruct_preorder(preorder):\n\tdef reconstruct_preorder_helper(preorder_iter):\n\t\tsubtree_key = next(preorder_iter, None)\n\t\tif subtree_key is None:\n\t\t\treturn None\n\t\treturn BinaryTreeNode(\n\t\t\tsubtree_key,\n\t\t\treconstruct_preorder_helper(preorder_iter),\n\t\t\treconstruct_preorder_helper(preorder_iter))\n\treturn reconstruct_preorder_helper(iter(preorder))", "def preorder(self,node):\n if node is not None:\n print node.value,\n self.preorder(node.left)\n self.preorder(node.right)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def traverse_graph_start_without_pos(graph):\n\n def traverse(graph, node):\n\n children = [int(c) for c in graph[node][\"children\"]]\n tagged_children = []\n for child in children:\n ellipsed_parents = [int(p) for p in graph[child][\"ellipsed_parents\"]]\n # if the child is explicit\n if node not in ellipsed_parents:\n if graph[child][\"terminal\"] == \"yes\":\n tagged_children.append(ParentedTree(graph[child][\"tag\"], [graph[child][\"text\"]]))\n else:\n tagged_children.append(traverse(graph, child))\n # if the child is ellipsed\n else:\n ellipsis_tag = get_ellipsis_tag_from_graph(graph, child)\n tagged_children.append(ParentedTree(ellipsis_tag, []))\n \n tree = ParentedTree(graph[node][\"tag\"], tagged_children)\n\n return tree\n \n tree = traverse(graph, 0)\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n rev_positions = [pos for pos in reversed(positions)]\n for pos_i, pos in enumerate(rev_positions):\n # append starting_node tag to the previous non-terminal node\n if tree[pos].label().startswith(\"start\"):\n prev_pos_i = pos_i + 1\n while tree[rev_positions[prev_pos_i]].height() == 2:\n prev_pos_i += 1\n prev_pos = rev_positions[prev_pos_i]\n tree[prev_pos].set_label(tree[prev_pos].label() + tree[pos].label())\n del tree[pos]\n\n return tree", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITH FOR DFS\n \n function graph-search(problem, fringe) retuen a sloution or failure\n \n closed <-- an empty set\n fringe <-- insert (make-node (initial-state [problem]), fringe)\n \n loop do :\n if fringe is empty then return failure\n node <-- Remove-front (fringe)\n if goal-test (problem, state[node]) then return node\n if state[node] is not in closed then \n add STATE[node] to closed\n for child-node in EXPAND(STATE[node],problem) do\n fringe <-- Insert (child-node, fringe)\n end\n end\n \"\"\"\n\n templist=[]\n explored = set()\n fringe = util.Stack()\n #print \"the stat node is : \", problem.getStartState()\n\n fringe.push((problem.getStartState(),templist))\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n # print \"Pacman is currently at : \", currentNode\n if problem.isGoalState(currentNode):\n # print \" Goal State Found : \", currentNode\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n # print \"Adding current node to explored\"\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # print \"child node : \", childNode , \" is added \"\n fringe.push((childNode[0],currDir+[childNode[1]]))\n\n return pathToGoal", "def insert_process_before(self, process, uuid,\n comment='', force_refresh=True):\n target = self.get_node(uuid)\n if target == self.root_node:\n raise Exception('Error: Cannot insert before the root node.')\n\n parent = target.parent\n children = list(target.get_siblings(include_self=True))\n node = ProcessNode.objects.create(process=process, piece=parent.piece,\n comment=comment, parent_id=parent.id)\n for child in children:\n child.parent = node\n child.save()\n\n if force_refresh:\n self.refresh_tree()\n return node", "def pre_traversal(self):\n if self.root is None:\n return None\n else:\n node_stack = list()\n output_list = list()\n node = self.root\n while node is not None or len(node_stack):\n # if node is None which means it comes from a leaf-node' right,\n # pop the stack and get it's right node.\n # continue the circulating like this\n if node is None:\n node = node_stack.pop().right\n continue\n # save the front node and go next when left node exists\n while node.left is not None:\n node_stack.append(node)\n output_list.append(node.get_element())\n node = node.left\n output_list.append(node.get_element())\n node = node.right\n return output_list", "def each_step(graph):\n\n steps = graph.topological_sort()\n steps.reverse()\n\n for step in steps:\n deps = graph.downstream(step.name)\n yield (step, deps)", "def traverse(self):\n return self.root.traverse()", "def tree():\n nobv.visual_tree()", "def _create_global_step(self, graph):\n return _create_global_step(graph)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n # print(\"Start:\", problem.getStartState())\n # print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n # print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n\n # Initialize a frontier, and push the initial state into the frontier\n frontier = util.Stack()\n frontier.push([(problem.getStartState(), 'move', 0)])\n # Initialize a explored set to store the visited nodes\n exploredSet = set()\n\n # Check the content of frontier\n while not frontier.isEmpty():\n stateList = list()\n stateList = frontier.pop()\n # print (stateList)\n # What we focus on is the next state, not the (previous state + next state), so we should take the last element\n nextState = stateList[len(stateList) - 1]\n # Check the current state is goal or not\n if problem.isGoalState(nextState[0]):\n # Initial a path, which is the way to the goal state\n path = list()\n for eachMove in stateList:\n path.append(eachMove[1])\n # If the initial state is the goal state, there's no need to explore other nodes, so that's called special condition\n if len(path) == 1:\n return path[0]\n # This is the normal condition, we should convey the path except the first one, because we haven't define what's \"move\"\n else:\n return path[1:]\n # If this is a state which we don't visit, add it to the explored set(this is called GSA)\n if not nextState[0] in exploredSet:\n exploredSet.add(nextState[0])\n # Give me your child nodes\n for childState in problem.getSuccessors(nextState[0]):\n nextStateList = stateList[:]\n # we focus on the path, so we have to record the every move from the initial state to the current one\n nextStateList.append(childState)\n frontier.push(nextStateList)\n\n # Or maybe there's no way to the goal state\n else:\n return \"There's no way.\"", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringeList = util.Stack()\n print \"fringeList\",fringeList\n closedList = {str(problem.getStartState()): ([])} #Hash Map to maintain state to path\n print \"closed list:\", closedList\n isGoalStateArrived = False\n\n # Push start state into fringeList\n fringeList.push((problem.getStartState()))\n\n while not isGoalStateArrived and not fringeList.isEmpty():\n currentNode = fringeList.pop()\n print \"currentNode\",currentNode\n currentNodePath = closedList[str(currentNode)]\n print \"currentNodepath:\",currentNodePath\n # Explore children\n childrenOfCurrentNode = problem.getSuccessors(currentNode)\n print \"childrenOfCurrentNode:\",childrenOfCurrentNode\n for childNode in childrenOfCurrentNode:\n if str(childNode[0]) not in closedList:\n path = copy.copy(currentNodePath)\n path.append(childNode[1])\n print \"child [0] %s, child [1] %s\", childNode[0],childNode[1]\n print \"path \", path\n fringeList.push(childNode[0])\n closedList[str(childNode[0])] = path # Put parent node in closed List\n if problem.isGoalState(childNode[0]):\n isGoalStateArrived = True\n goalState = childNode[0]\n break\n\n if isGoalStateArrived:\n #print closedList[str(problem.getStartState())]\n return closedList[str(goalState)]\n \"util.raiseNotDefined()\"", "def tree_search(problem, frontier):\n compteur = 0\n stop = False\n frontier.append(Node(problem.initial))\n while frontier and not stop:\n compteur+=1\n node = frontier.pop()\n if problem.goal_test(node.state):\n return node\n if(compteur <= limit):\n frontier.extend(node.expand(problem))\n else:\n stop = True\n \n return None", "def process_struct(self):\n offset = 0\n current_node = None\n while offset < self.length:\n cmd, next_offset = self.get_command(offset)\n # Process Commands\n if cmd == self.CMD_Node_Start:\n # Create new node and return next offset\n next_offset, node = self.new_node(next_offset)\n # Is there a root node? if not make this node root\n if not self.root:\n self.root = node\n node.phandles = {} # Add dict of phandles to root node.\n # If we are the root node, do not add link to self\n if current_node:\n current_node.add_child(node)\n node.set_parent(current_node)\n # Make our self be the current node\n current_node = node\n elif cmd == self.CMD_Node_End:\n if not current_node.is_root():\n current_node = current_node.get_parent()\n elif cmd == self.CMD_Property:\n next_offset = self.new_property(current_node, next_offset, self.properties)\n elif cmd == self.CMD_Stream_End:\n self.root.process_deferreds()\n self.do_process_passes()\n break\n offset = next_offset", "def main() -> int:\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n \"--version\", \"-V\", action=\"store_true\", help=\"print program version\"\n )\n parser.add_argument(\n \"--verbose\", \"-v\", help=\"-v for info, -vv for debug\", action=\"count\", default=0\n )\n parser.add_argument(\n \"--danglers-first\",\n action=\"store_true\",\n help=\"list all independent repositories first\",\n )\n parser.add_argument(\n \"--danglers-only\",\n action=\"store_true\",\n help=\"only list independent repositories\",\n )\n parser.add_argument(\n \"--no-danglers\",\n action=\"store_true\",\n help=\"drop all independent repositories from graph\",\n )\n parser.add_argument(\n \"--graph\",\n \"-g\",\n action=\"store_true\",\n help=\"return dot graph of build dependencies\",\n )\n parser.add_argument(\n \"--reverse\",\n \"-r\",\n action=\"store_true\",\n help=\"reverse output, only works with --graph, shows parallel build stream\",\n )\n parser.add_argument(\n \"directories\", type=Path, nargs=\"*\", help=\"directories to graph\"\n )\n args = parser.parse_args()\n\n if args.version:\n print(\"controlgraph {}\".format(__version__))\n return 0\n\n # set up logging\n logging.basicConfig(\n format=\"[%(levelname)s] %(message)s\", level=10 * (3 - min(args.verbose, 2))\n )\n\n if not args.directories:\n args.directories = [p for p in Path.cwd().iterdir() if p.is_dir()]\n\n # Get graph and print\n dep_graph = graph(parse_all_controlfiles(args.directories))\n if args.no_danglers:\n isolates = list(nx.isolates(dep_graph))\n dep_graph.remove_nodes_from(isolates)\n\n if args.graph:\n if args.reverse:\n dep_graph = dep_graph.reverse()\n nx.nx_pydot.write_dot(dep_graph, sys.stdout)\n else:\n if args.danglers_first:\n # Put all isolated nodes first in list\n isolates = list(nx.isolates(dep_graph))\n dep_graph.remove_nodes_from(isolates)\n build_order = list(nx.dfs_postorder_nodes(dep_graph))\n print(\" \".join(isolates + build_order))\n elif args.danglers_only:\n print(\" \".join(list(nx.isolates(dep_graph))))\n else:\n print(\" \".join(list(nx.dfs_postorder_nodes(dep_graph))))\n\n return 0", "def treePolicy(node):\n while not node.getState().checkTerminal():\n if node.checkFullyExpanded():\n node = findBestChild(node, True)\n else:\n return expandNode(node)\n return node", "def HierarchyIterator(obj):\n while obj:\n yield obj\n for opChild in SplineInputGeneratorHelper.HierarchyIterator(obj.GetDown()):\n yield opChild\n obj = obj.GetNext()", "def traverse_preorder(self, root):\n if root is not None:\n print(root.data)\n self.traverse_preorder(root.left)\n self.traverse_preorder(root.right)", "def preorderTraversal(self, root: TreeNode) -> List[int]:\n def preorder(root,seq):\n if root is None:\n return seq\n seq.append(root.val)\n preorder(root.left,seq)\n preorder(root.right,seq)\n return seq\n \n prelist= []\n return preorder(root,prelist)" ]
[ "0.58344984", "0.5687344", "0.5568197", "0.5373498", "0.536982", "0.5367479", "0.5362519", "0.53463984", "0.5334048", "0.53192425", "0.5254958", "0.5220183", "0.5207111", "0.5187977", "0.5185732", "0.5169036", "0.5158708", "0.5143826", "0.5143826", "0.5135224", "0.5132148", "0.5126587", "0.5118868", "0.50852746", "0.5077901", "0.5061578", "0.5049207", "0.5037477", "0.5035192", "0.5020215", "0.5017411", "0.50103873", "0.5004039", "0.49980214", "0.4978178", "0.49619672", "0.49604002", "0.49572602", "0.49520054", "0.49502933", "0.49499676", "0.49323398", "0.49311364", "0.48913074", "0.48786107", "0.48786107", "0.48751625", "0.48707485", "0.48698914", "0.48670882", "0.48670104", "0.486034", "0.48541787", "0.48493865", "0.48484927", "0.4835003", "0.48331115", "0.48323867", "0.48291615", "0.48259002", "0.48231345", "0.48228607", "0.48195153", "0.4814814", "0.48120558", "0.48100936", "0.48070776", "0.48058185", "0.48040685", "0.47990662", "0.47970408", "0.4796865", "0.4791851", "0.47902837", "0.47846624", "0.4778931", "0.47696438", "0.47653434", "0.4763501", "0.4756638", "0.4755469", "0.4754987", "0.47525102", "0.47336924", "0.4732818", "0.473244", "0.47321558", "0.47260767", "0.4725078", "0.47249776", "0.47213373", "0.47168306", "0.47155264", "0.47100428", "0.47077078", "0.4691905", "0.46915713", "0.468658", "0.46845144", "0.46840906", "0.4682343" ]
0.0
-1
Runs the component. The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit Cost Ratio, Levelized Cost of Energy, and Internal Rate of Return will all be calculated. There must be a known Heat Recovery project for this component to run.
def run (self, scalers = {'capital costs':1.0}): self.was_run = True self.reason = "OK" tag = self.cd['file id'].split('+') if len(tag) > 1 and tag[1] != 'transmission': self.was_run = False self.reason = "Not a transmission project." return if not self.cd["model electricity"]: self.was_run = False self.reason = "Electricity must be modeled to analyze "+\ "transmission. It was not for this community." return if np.isnan(float(self.comp_specs['distance to community'])): self.was_run = False self.reason = ("There are no communities within 30 miles with" " lower cost of electricity.") return self.calc_average_load() try: self.get_intertie_values() except ValueError: self.was_run = False self.reason = ("Could not find data on community to intertie to.") return self.calc_pre_intertie_generation() self.calc_intertie_offset_generation() if self.cd["model heating fuel"]: # change these below self.calc_lost_heat_recovery() # see NOTE* #~ return if self.cd["model financial"]: # AnnualSavings functions (don't need to write) self.get_diesel_prices() # change these below self.calc_capital_costs() self.calc_annual_electric_savings() self.calc_annual_heating_savings() # AnnualSavings functions (don't need to write) self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd["current year"]) #~ print self.benefit_cost_ratio self.calc_levelized_costs(self.proposed_generation_cost)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run (self, scalers = {'capital costs':1.0}):\n\n self.was_run = True\n self.reason = \"OK\"\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'wind':\n self.was_run = False\n self.reason = \"Not a Wind project\"\n return\n\n try:\n #~ self.generation = self.forecast.get_generation(self.start_year)\n self.calc_average_load()\n self.calc_generation_wind_proposed()\n except AttributeError:\n self.diagnostics.add_warning(self.component_name,\n \"could not be run\")\n self.was_run = False\n self.reason = (\"Could not Calculate average load or \"\n \"proposed generation\")\n return\n\n\n\n\n #~ #~ print self.comp_specs['wind class']\n # ??? some kind of failure message\n if self.average_load is None or \\\n (self.average_load > self.comp_specs['average load limit'] and \\\n self.load_offset_proposed > 0):\n #~ float(self.comp_specs['wind class']) > \\\n #~ self.comp_specs['minimum wind class'] and \\\n\n # if the average load is greater that the lower limit run this component\n # else skip\n\n self.calc_transmission_losses()\n self.calc_excess_energy()\n self.calc_net_generation_wind()\n self.calc_electric_diesel_reduction()\n self.calc_diesel_equiv_captured()\n self.calc_loss_heat_recovery()\n self.calc_reduction_diesel_used()\n\n\n if self.cd[\"model financial\"]:\n # AnnualSavings functions (don't need to write)\n self.get_diesel_prices()\n\n # change these below\n self.calc_capital_costs()\n self.calc_maintenance_cost()\n self.calc_annual_electric_savings()\n self.calc_annual_heating_savings()\n\n # AnnualSavings functions (don't need to write)\n self.calc_annual_total_savings()\n self.calc_annual_costs(self.cd['interest rate'],\n scalers['capital costs'])\n self.calc_annual_net_benefit()\n self.calc_npv(self.cd['discount rate'], self.cd[\"current year\"])\n #~ print self.benefit_cost_ratio\n self.calc_levelized_costs(self.maintenance_cost)\n else:\n #~ print \"wind project not feasible\"\n self.was_run = False\n if self.load_offset_proposed <= 0:\n self.reason = \"Proposed load offset less than 0\"\n else:\n self.reason = \\\n \"Average load too small for viable wind generation.\"\n self.diagnostics.add_note(self.component_name,\n \"communities average load is not large enough to consider project\")\n #~ print self.benefit_cost_ratio", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def run (self, scalers = {'capital costs':1.0}):\n self.was_run = True\n self.reason = \"OK\"\n\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'biomass_pellet':\n self.was_run = False\n self.reason = (\"Not a biomass pellet project.\")\n return\n\n if not self.cd[\"on road system\"]:\n self.diagnostics.add_warning(self.component_name,\n \"not on road system\")\n self.max_boiler_output = 0\n self.heat_displaced_sqft = 0\n self.biomass_fuel_consumed = 0\n self.fuel_price_per_unit = 0\n self.heat_diesel_displaced = 0\n self.reason = \\\n \"Not on road or marine highway system, so it is assumed that\" +\\\n \" pellets cannot be delivered cost effectively.\"\n return\n\n if np.isnan(float(self.comp_specs['peak month % of total'])):\n self.diagnostics.add_warning(self.component_name,\n \"bad config value for 'peak month % of total'\")\n self.max_boiler_output = 0\n self.heat_displaced_sqft = 0\n self.biomass_fuel_consumed = 0\n self.fuel_price_per_unit = 0\n self.heat_diesel_displaced = 0\n self.reason = \"bad config value for 'peak month % of total'\"\n return\n\n if self.cd[\"model heating fuel\"]:\n self.calc_heat_displaced_sqft()\n self.calc_energy_output()\n efficiency = self.comp_specs[\"pellet efficiency\"]\n self.calc_max_boiler_output(efficiency)\n factor = self.comp_specs['capacity factor']\n self.calc_biomass_fuel_consumed(factor)\n self.calc_diesel_displaced()\n\n\n if self.cd[\"model financial\"]:\n self.get_diesel_prices()\n\n self.calc_capital_costs()\n self.calc_maintenance_cost()\n\n\n self.fuel_price_per_unit = self.cd['pellet price']\n\n self.calc_proposed_biomass_cost(self.fuel_price_per_unit)\n self.calc_displaced_heating_oil_price()\n\n\n self.calc_annual_electric_savings()\n self.calc_annual_heating_savings()\n\n self.calc_annual_total_savings()\n self.calc_annual_costs(self.cd['interest rate'],\n scalers['capital costs'])\n self.calc_annual_net_benefit()\n self.calc_npv(self.cd['discount rate'], self.cd[\"current year\"])\n\n fuel_cost = self.biomass_fuel_consumed * self.fuel_price_per_unit\n self.calc_levelized_costs(self.maintenance_cost + fuel_cost)", "def run_module(self):\n try:\n if self.in_distributed_mode:\n self.output_dict['insurance_usd'] = 0\n self.output_dict['construction_permitting_usd'] = 0\n self.output_dict['project_management_usd'] = 0\n self.output_dict['bonding_usd'] = 0\n self.output_dict['markup_contingency_usd'] = 0\n self.output_dict['engineering_usd'] = 0\n self.output_dict['site_facility_usd'] = 0\n self.output_dict['total_management_cost'] = self.input_dict['override_total_management_cost']\n\n else:\n self.output_dict['insurance_usd'] = self.insurance()\n self.output_dict['construction_permitting_usd'] = self.construction_permitting()\n self.output_dict['project_management_usd'] = self.project_management()\n self.output_dict['bonding_usd'] = self.bonding()\n self.output_dict['markup_contingency_usd'] = self.markup_contingency()\n self.output_dict['engineering_usd'] = self.engineering_foundations_collection_sys()\n self.output_dict['site_facility_usd'] = self.site_facility()\n self.output_dict['total_management_cost'] = self.total_management_cost()\n self.output_dict['management_cost_csv'] = self.outputs_for_detailed_tab()\n self.output_dict['mangement_module_type_operation'] = self.outputs_for_module_type_operation()\n return 0, 0 # module ran successfully\n except Exception as error:\n traceback.print_exc()\n print(f\"Fail {self.project_name} ManagementCost\")\n return 1, error # module did not run successfully", "def do_work(self) -> None:\n self._get_btc_eur_15min()\n print(\n f\"1 BTC = {self.btc_eur_15min} EUR\"\n f\"\\t\\t(15min delayed market price)\"\n )\n\n self._get_eur_gbp_last_month()\n print(\n f\"1 EUR = {self.eur_gbp_last_month} GBP\"\n f\"\\t(last month average rate)\"\n )\n\n self._get_btc_gbp_15min()\n print(\n f\"1 BTC = {self.btc_gbp_15min:.6f} GBP\"\n f\"\\t(BTC 15min delayed market price; GBP latest daily average rate)\"\n )", "def compute(self):\r\n #obtain and validate the inputs\r\n startBalance = self.amount.getNumber()\r\n rate = self.rate.getNumber() / 100\r\n years = self.period.getNumber()\r\n if startBalance == 0 or rate == 0 or years == 0:\r\n return\r\n \r\n #Set the header for the table\r\n result = \"%4s%18s%10s%16s\\n\" % (\"Year\", \"Starting Balance\", \"Interest\", \"Ending Balance\")\r\n\r\n #Compute and apend the results for each year\r\n totalInterest = 0.0\r\n for year in range(1, years + 1):\r\n interest = startBalance * rate\r\n endBalance = startBalance + interest\r\n result += \"%4d%18.2f%10.2f%16.2f\\n\" % (year, startBalance, interest, endBalance)\r\n startBalance = endBalance\r\n totalInterest += interest\r\n\r\n #Append the totals for the period\r\n result += \"Ending balance: $%0.2f\\n\" % endBalance\r\n result += \"Total interest earned: $%0.2f\\n\" % totalInterest\r\n\r\n #Output the results while preserving read-only status\r\n self.outputArea[\"state\"] = \"normal\"\r\n self.outputArea.setText(result)\r\n self.outputArea[\"state\"] = \"disabled\"", "def compute (self):\r\n #obtain and validate the inputs\r\n startBalance = self.amount.getNumber()\r\n rate = self.rate.getNumber()/100\r\n years = self.period.getNumber()\r\n if startBalance == 0 or rate == 0 or years == 0:\r\n return\r\n #set the header for the table\r\n result = \"%4s%18s%10s%16s\\n\" % (\"Year\", \"Starting Balance\", \"Interest\", \"Ending Balance\")\r\n #Compute and append the results for each year\r\n totalInterest = 0.0\r\n for year in range (1, years + 1):\r\n interest = startBalance * rate\r\n endBalance = startBalance + interest\r\n result += \"%4d%18.2f%10.2f%16.2f\\n\" % (year, startBalance, interest, endBalance)\r\n #the ending balance for year 1 wil lbe the starting balance for year 2 and so on\r\n startBalance = endBalance\r\n totalInterest += interest\r\n #Append the totals for the entire period - final output for the whole thing\r\n result += \"Ending Balance: $%0.2f\\n\" % endBalance\r\n result += \"Total interest earned: $%0.2f\\n\" % totalInterest\r\n #Output the result while preserving read-only status\r\n self.outputArea[\"state\"] = \"normal\"\r\n self.outputArea.setText(result)\r\n self.outputArea[\"state\"] = \"disabled\"", "def calc_monthly_cash(self):\n # shortcut to self\n s = self\n\n # Start the DataFrames, base and w/ heat pump\n # Each starts with just an index column with the month\n # Make shortcut variables as well.\n s.df_mo_dol_base = dfb = s.df_mo_en_base[[]].copy()\n s.df_mo_dol_hp = dfh = s.df_mo_en_base[[]].copy()\n\n # Determine the base electric use by month. Approach is different \n # if there is electric heat.\n is_electric_heat = (s.exist_heat_fuel_id == constants.ELECTRIC_ID)\n if not is_electric_heat:\n # Fuel-based space heat.\n # The User supplied a January and a May kWh usage value that should\n # be used for the base case (no heat pump) total electricity use.\n # But, need to come up with a kWh value for every month. Do that by\n # adjusting the kWh pattern available for this city.\n #\n # Determine the multiplier to adjust to the pattern to the actual.\n pat_use = np.array(s.city.avg_elec_usage)\n mult = (s.elec_use_jan - s.elec_use_may) / (pat_use[0] - pat_use[4])\n pat_use = mult * pat_use\n pat_use += s.elec_use_jan - pat_use[0]\n\n # The electricity use in the base case\n dfb['elec_kwh'] = pat_use\n\n # rough estimate of a base demand: not super critical, as the demand rate \n # structure does not have blocks. Assume a load factor of 0.4\n dfb['elec_kw'] = dfb.elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n else:\n # Electric Heat Case\n # No Jan and May values are provided. Instead we have possibly some\n # DHW, clothes drying, and cooking. Plus, we have base lights/other appliances.\n # And finally we have the Elecric heat making up the base electric usage.\n\n # First, DHW, Clothes Drying and Cooking. Assume flat use through year.\n # This is a numpy array because DAYS_IN_MONTH is an array.\n elec_kwh = s.fuel_other_uses / 8760.0 * DAYS_IN_MONTH * 24.0\n\n # Now lights and other misc. appliances. Some monthly variation, given\n # by LIGHTS_OTHER_PAT.\n elec_kwh += s.lights_other_elec / 8760.0 * LIGHTS_OTHER_PAT * DAYS_IN_MONTH * 24.0\n\n # For the peak demand of those two categories of use, just assume 40% load factor.\n elec_kw = elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n # Now add in space heating kWh and kW\n elec_kwh += s.df_mo_en_base.total_kwh.values\n elec_kw += s.df_mo_en_base.total_kw.values\n\n # store results\n dfb['elec_kwh'] = elec_kwh\n dfb['elec_kw'] = elec_kw\n\n # Make an object to calculate electric utility costs\n elec_cost_calc = ElecCostCalc(s.utility, sales_tax=s.sales_tax, pce_limit=s.pce_limit)\n # cost function that will be applied to each row of the cost DataFrame\n cost_func = lambda r: elec_cost_calc.monthly_cost(r.elec_kwh, r.elec_kw)\n\n dfb['elec_dol'] = dfb.apply(cost_func, axis=1)\n\n if not is_electric_heat:\n # Now fuel use by month. Remember that the home heat model only looked at\n # space heating, so we need to add in the fuel use from the other end uses\n # that use this fuel.\n dfb['secondary_fuel_units'] = s.df_mo_en_base.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfb['secondary_fuel_dol'] = dfb.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfb['secondary_fuel_units'] = 0.0\n dfb['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfb['total_dol'] = dfb.elec_dol + dfb.secondary_fuel_dol\n\n # Now with the heat pump\n # determine extra kWh used in the heat pump scenario. Note, this will\n # be negative numbers if the base case used electric heat.\n extra_kwh = (s.df_mo_en_hp.total_kwh - s.df_mo_en_base.total_kwh).values\n dfh['elec_kwh'] = dfb['elec_kwh'] + extra_kwh\n extra_kw = (s.df_mo_en_hp.total_kw - s.df_mo_en_base.total_kw).values\n dfh['elec_kw'] = dfb['elec_kw'] + extra_kw\n dfh['elec_dol'] = dfh.apply(cost_func, axis=1)\n\n # Now fuel, including other end uses using the heating fuel\n if not is_electric_heat:\n dfh['secondary_fuel_units'] = s.df_mo_en_hp.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfh['secondary_fuel_dol'] = dfh.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfh['secondary_fuel_units'] = 0.0\n dfh['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfh['total_dol'] = dfh.elec_dol + dfh.secondary_fuel_dol", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet2 as Generate\n \n ######################### Set General Parameters ##############################\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n \n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, P_Product, Daily = 'y') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_LAI = Start.Download_Data.LAI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, LAI_Product) \n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Daily = os.path.join(Data_Path_P, 'Daily')\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create Rainy Days based on daily CHIRPS\n Data_Path_RD = Two.Rainy_Days.Calc_Rainy_Days(Dir_Basin, Data_Path_P_Daily, Startdate, Enddate)\n\n # Create monthly LAI\n Dir_path_LAI = os.path.join(Dir_Basin, Data_Path_LAI)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_LAI, Startdate, Enddate)\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n \n # Create monthly GPP \n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 2)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_Prec\n\n #_______________________________Evaporation________________________________\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n #_______________________________Rainy Days_________________________________\n\n # Define info for the nc files\n info = ['monthly','days', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_RD = DC.Create_NC_name('RD', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_RD):\n\n # Get the data of Evaporation and save as nc\n DataCube_RD = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_RD, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_RD, DataCube_RD, 'RD', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_RD\n\n #_______________________________Leaf Area Index____________________________\n\n # Define info for the nc files\n info = ['monthly','m2-m-2', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_LAI = DC.Create_NC_name('LAI', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_LAI):\n\n # Get the data of Evaporation and save as nc\n DataCube_LAI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_LAI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_LAI, DataCube_LAI, 'LAI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_LAI\n\n ####################### Calculations Sheet 2 ##############################\n \n DataCube_I, DataCube_T, DataCube_E = Two.SplitET.ITE(Dir_Basin, Name_NC_ET, Name_NC_LAI, Name_NC_P, Name_NC_RD, Name_NC_NDM, Name_NC_LU, Startdate, Enddate, Simulation)\n \n ############################ Create CSV 2 ################################# \n\n Dir_Basin_CSV = Generate.CSV.Create(Dir_Basin, Simulation, Basin, Startdate, Enddate, Name_NC_LU, DataCube_I, DataCube_T, DataCube_E, Example_dataset)\n\n ############################ Create Sheet 2 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV)\n\n return()", "def report_total_usage(self):\n work_time = 0\n if self.type == 'normal':\n work_time = self.fwk.fwk_global_time - self.start_exec_time\n elif self.type == 'sandia_work':\n self.total_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_usage = self.total_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.completed_work += self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.sim.rework_todo += self.fwk.fwk_global_time - self.start_exec_time\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_rework':\n self.total_rework_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.next_ckpt = self.sim.ckpt_interval - (self.fwk.fwk_global_time - self.start_exec_time)\n self.sim.rework_todo -= self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_ckpt':\n self.total_ckpt_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_ckpt_usage = self.total_ckpt_time * self.nproc\n if self.state == \"running\":\n # update last ckpt\n self.sim.last_ckpt = self.sim.completed_work\n elif self.state == \"failed\":\n # add work to rework\n self.sim.rework_todo += self.sim.next_ckpt\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_restart':\n print(\"time spent in rework\", self.fwk.fwk_global_time - self.start_exec_time)\n self.total_restart_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_restart_usage = self.total_restart_time * self.nproc\n #if self.state == \"running\":\n # nothing to do?\n # pass\n if self.state == \"failed\":\n # gotta try again\n self.state = \"ready\"\n self.num_faults += 1\n else:\n print(\"problems updating state in report_total_usage\")\n raise\n if self.type == 'normal':\n if self.sim.state == 'rework':\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else: # sim.state == 'work'\n if self.retry:\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else:\n self.total_time += work_time\n self.total_usage = self.total_time * self.nproc", "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def execute(self):\n \n # initialize input parameters\n self.hubHt = self.hub_height\n self.ratedPower = self.machine_rating\n self.maxTipSpd = self.max_tip_speed\n self.rotorDiam = self.rotor_diameter\n self.maxCp = self.max_power_coefficient\n self.maxTipSpdRatio = self.opt_tsr\n self.cutInWS = self.cut_in_wind_speed\n self.cutOutWS = self.cut_out_wind_speed\n self.altitude = self.altitude\n\n if self.air_density == 0.0: \n # Compute air density \n ssl_pa = 101300 # std sea-level pressure in Pa\n gas_const = 287.15 # gas constant for air in J/kg/K\n gravity = 9.80665 # standard gravity in m/sec/sec\n lapse_rate = 0.0065 # temp lapse rate in K/m\n ssl_temp = 288.15 # std sea-level temp in K\n \n air_density = (ssl_pa * (1-((lapse_rate*(self.altitude + self.hubHt))/ssl_temp))**(gravity/(lapse_rate*gas_const))) / \\\n (gas_const*(ssl_temp-lapse_rate*(self.altitude + self.hubHt)))\n else:\n \t\tair_density = self.air_density\n\n # determine power curve inputs\n self.reg2pt5slope = 0.05\n \n #self.max_efficiency = self.drivetrain.getMaxEfficiency()\n self.ratedHubPower = self.ratedPower / self.max_efficiency # RatedHubPower\n\n self.omegaM = self.maxTipSpd/(self.rotorDiam/2.) # Omega M - rated rotor speed\n omega0 = self.omegaM/(1+self.reg2pt5slope) # Omega 0 - rotor speed at which region 2 hits zero torque\n Tm = self.ratedHubPower*1000/self.omegaM # Tm - rated torque\n\n # compute rated rotor speed\n self.ratedRPM = (30./pi) * self.omegaM\n \n # compute variable-speed torque constant k\n kTorque = (air_density*pi*self.rotorDiam**5*self.maxCp)/(64*self.maxTipSpdRatio**3) # k\n \n b = -Tm/(self.omegaM-omega0) # b - quadratic formula values to determine omegaT\n c = (Tm*omega0)/(self.omegaM-omega0) # c\n \n # omegaT is rotor speed at which regions 2 and 2.5 intersect\n # add check for feasibility of omegaT calculation 09/20/2012\n omegaTflag = True\n if (b**2-4*kTorque*c) > 0:\n omegaT = -(b/(2*kTorque))-(np.sqrt(b**2-4*kTorque*c)/(2*kTorque)) # Omega T\n #print [kTorque, b, c, omegaT]\n \n windOmegaT = (omegaT*self.rotorDiam)/(2*self.maxTipSpdRatio) # Wind at omegaT (M25)\n pwrOmegaT = kTorque*omegaT**3/1000 # Power at ometaT (M26)\n\n else:\n omegaTflag = False\n windOmegaT = self.ratedRPM\n pwrOmegaT = self.ratedPower\n\n # compute rated wind speed\n d = air_density*np.pi*self.rotorDiam**2.*0.25*self.maxCp\n self.ratedWindSpeed = \\\n 0.33*( (2.*self.ratedHubPower*1000. / ( d))**(1./3.) ) + \\\n 0.67*( (((self.ratedHubPower-pwrOmegaT)*1000.) / (1.5*d*windOmegaT**2.)) + windOmegaT )\n\n # set up for idealized power curve\n n = 161 # number of wind speed bins\n itp = [None] * n\n ws_inc = 0.25 # size of wind speed bins for integrating power curve\n Wind = []\n Wval = 0.0\n Wind.append(Wval)\n for i in xrange(1,n):\n Wval += ws_inc\n Wind.append(Wval)\n\n # determine idealized power curve \n self.idealPowerCurve (Wind, itp, kTorque, windOmegaT, pwrOmegaT, n , omegaTflag)\n\n # add a fix for rated wind speed calculation inaccuracies kld 9/21/2012\n ratedWSflag = False\n # determine power curve after losses\n mtp = [None] * n\n for i in xrange(0,n):\n mtp[i] = itp[i] #* self.drivetrain.getDrivetrainEfficiency(itp[i],self.ratedHubPower)\n #print [Wind[i],itp[i],self.drivetrain.getDrivetrainEfficiency(itp[i],self.ratedHubPower),mtp[i]] # for testing\n if (mtp[i] > self.ratedPower):\n if not ratedWSflag:\n ratedWSflag = True\n mtp[i] = self.ratedPower\n\n self.rated_wind_speed = self.ratedWindSpeed\n self.rated_rotor_speed = self.ratedRPM\n self.power_curve = mtp\n self.wind_curve = Wind\n\n # compute turbine load outputs\n self.rotor_torque = self.ratedHubPower/(self.ratedRPM*(pi/30.))*1000.\n self.rotor_thrust = air_density * self.thrust_coefficient * pi * self.rotor_diameter**2 * (self.ratedWindSpeed**2) / 8.", "def main():\n trades = get_trades()\n _print_trades(trades)\n\n print(\"\\n# Cost basis per asset\")\n _cost_basis_per_asset(trades)\n\n for year in range(2015, 2019):\n trades_for_year = _filter_trades_by_time(trades, year)\n _print_balances(trades_for_year, year)\n _print_agg_trades(trades_for_year, year)", "def calculate(self, technologies, value_streams, results, opt_years):\n self.initiate_cost_benefit_analysis(technologies, value_streams)\n super().calculate(self.ders, self.value_streams, results, opt_years)\n self.create_equipment_lifetime_report(self.ders)", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, NDVI_Product, dict_crops, dict_non_crops, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Three as Three\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet3 as Generate\n import wa.Functions.Start.Get_Dictionaries as GD\n \n ######################### Set General Parameters ##############################\n\n # Check if there is a full year selected between Startdate and Enddate, otherwise Sheet 3 cannot be produced \n try:\n years_end = pd.date_range(Startdate,Enddate,freq=\"A\").year\n years_start = pd.date_range(Startdate,Enddate,freq=\"AS\").year\n if (len(years_start) == 0 or len(years_end) == 0):\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n years = np.unique(np.append(years_end,years_start))\n except:\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \t\n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n\n #Set Startdate and Enddate for moving average\n ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0') \n Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())\n Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)\n Enddate_Moving_Average = pd.Timestamp(Enddate) + pd.DateOffset(months = 0)\n Startdate_Moving_Average_String = '%d-%02d-%02d' %(Startdate_Moving_Average.year, Startdate_Moving_Average.month, Startdate_Moving_Average.day)\n Enddate_Moving_Average_String = '%d-%02d-%02d' %(Enddate_Moving_Average.year, Enddate_Moving_Average.month, Enddate_Moving_Average.day)\n\n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String, P_Product, Daily = 'n') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String)\n Data_Path_NDVI = Start.Download_Data.NDVI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate)\n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n\n # Create monthly GPP\n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n # Create monthly NDVI based on MOD13\n if NDVI_Product == 'MOD13':\n Dir_path_NDVI = os.path.join(Dir_Basin, Data_Path_NDVI)\n Start.Sixteendaily_to_monthly_state.Nearest_Interpolate(Dir_path_NDVI, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n DataCube_LU[DataCube_LU<0] = np.nan\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 3)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n #_______________________________Evaporation________________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #____________________________________NDVI__________________________________\n\n info = ['monthly','-', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n\n Name_NC_NDVI = DC.Create_NC_name('NDVI', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDVI):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDVI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDVI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDVI, DataCube_NDVI, 'NDVI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_NDVI\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_Prec\n\n #________________________Reference Evaporation______________________________\n\n # Reference Evapotranspiration data\n Name_NC_ETref = DC.Create_NC_name('ETref', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ETref):\n\n # Get the data of Evaporation and save as nc\n DataCube_ETref = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ETref, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ETref, DataCube_ETref, 'ETref', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_ETref\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n ############################# Calculate Sheet 3 ###########################\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n #____________ Evapotranspiration data split in ETblue and ETgreen ____________\n\n Name_NC_ETgreen = DC.Create_NC_name('ETgreen', Simulation, Dir_Basin, 3, info)\n Name_NC_ETblue = DC.Create_NC_name('ETblue', Simulation, Dir_Basin, 3, info)\n \n if not (os.path.exists(Name_NC_ETgreen) or os.path.exists(Name_NC_ETblue)):\n\n # Calculate Blue and Green ET\n DataCube_ETblue, DataCube_ETgreen = Three.SplitET.Blue_Green(Startdate, Enddate, Name_NC_LU, Name_NC_ETref, Name_NC_ET, Name_NC_P)\n\n # Save the ETblue and ETgreen data as NetCDF files\n DC.Save_as_NC(Name_NC_ETblue, DataCube_ETblue, 'ETblue', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n DC.Save_as_NC(Name_NC_ETgreen, DataCube_ETgreen, 'ETgreen', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n\n del DataCube_ETblue, DataCube_ETgreen\n \n #____________________________ Create the empty dictionaries ____________________________\n \n # Create the dictionaries that are required for sheet 3 \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary = GD.get_sheet3_empties()\n \n #____________________________________ Fill in the dictionaries ________________________\n\n # Fill in the crops dictionaries \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary = Three.Fill_Dicts.Crop_Dictionaries(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops, Name_NC_LU, Name_NC_ETgreen, Name_NC_ETblue, Name_NC_NDM, Name_NC_P, Dir_Basin)\n\n # Fill in the non crops dictionaries \n wp_y_non_crop_dictionary = Three.Fill_Dicts.Non_Crop_Dictionaries(wp_y_non_crop_dictionary, dict_non_crops)\n\n for year in years:\n\n ############################ Create CSV 3 ################################# \n \n csv_fh_a, csv_fh_b = Generate.CSV.Create(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary, Basin, Simulation, year, Dir_Basin)\n\n ############################ Create Sheet 3 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, csv_fh_a, csv_fh_b)\n \n return()", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def execute(self, parameters, messages):\n execute_tool(arcsdm.acterbergchengci.Calculate, self, parameters, messages)\n return", "def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")", "def _compute_(self):\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n fbgc = \"data/sim/{dn}/{rad}/exp.bgc.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), \n rad=self.rad, bm=self.bmnum)\n fflare = \"data/sim/{dn}/{rad}/exp.flare.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"),\n rad=self.rad, bm=self.bmnum)\n cmd = \"export DIR_MODELS_REF_DAT=/home/shibaji/Collaboration_NCAR/code_rt_sd/pharlap/pharlap_4.1.3/dat;\\\n cd pharlap;\\\n matlab -nodisplay -nodesktop -nosplash -nojvm -r \\\"UT=[{ut}];rad='{rad}';dic='{dic}';fbgc='{fbgc}';bm={bm};\\\n fflare='{fflare}';rt_1D_sim;exit;\\\"\".format(ut=self.event.strftime(\"%Y %m %d %H %S\"), rad=self.rad,\n dic=dic, bm=self.bmnum, fbgc=fbgc, fflare=fflare)\n os.system(cmd)\n return", "def calc_capital_costs (self):\n powerhouse_control_cost = 0\n if not self.cd['switchgear suitable for renewables']:\n powerhouse_control_cost = self.cd['switchgear cost']\n\n #~ road_needed = self.comp_specs['road needed for transmission line']\n\n\n if str(self.comp_specs['transmission capital cost'])\\\n != 'UNKNOWN':\n transmission_line_cost = \\\n int(self.comp_specs['transmission capital cost'])\n else:\n if str(self.comp_specs['distance to resource']) \\\n != 'UNKNOWN':\n distance = \\\n float(self.comp_specs\\\n ['distance to resource'])\n transmission_line_cost = \\\n distance*self.comp_specs['est. transmission line cost']\n\n secondary_load_cost = 0\n if self.comp_specs['secondary load']:\n secondary_load_cost = self.comp_specs['secondary load cost']\n\n if str(self.comp_specs['generation capital cost']) \\\n != 'UNKNOWN':\n wind_cost = \\\n int(self.comp_specs['generation capital cost'])\n self.cost_per_kw = np.nan\n else:\n for i in range(len(self.comp_specs['estimated costs'])):\n if int(self.comp_specs['estimated costs'].iloc[i].name) < \\\n self.load_offset_proposed:\n if i == len(self.comp_specs['estimated costs']) - 1:\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n continue\n\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n\n wind_cost = self.load_offset_proposed * cost\n self.cost_per_kw = cost\n\n #~ print powerhouse_control_cost\n #~ print transmission_line_cost\n #~ print secondary_load_cost\n #~ print wind_cost\n self.capital_costs = powerhouse_control_cost + transmission_line_cost +\\\n secondary_load_cost + wind_cost\n\n #~ print 'self.capital_costs',self.capital_costs", "def run_simulation(self):\n env = simpy.Environment()\n env.process(self._simulation(env))\n env.run(until=24 * HORIZON)\n return self.total_cost, self.total_profit, self.number_of_courses", "def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._copy_ne_()\n [self._compute_(case) for case in [\"bgc\", \"flare\"]]\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec", "def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh", "def solve_model(self): \n \n t0 = time.time() #start the clock\n \n # a. benchmark case\n \n #i. joint pdf of productivity state and tau \n self.make_joint_pdf(1)\n \n #ii. set policy. in RR08 the benchmark economy has no taxes nor subsidies\n self.tau_benchmark = np.array([0, 0, 0]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(self.tau_benchmark) #set tax system\n \n #iii. benchmark equilibrium\n self.Yss_b, self.Kss_b, self.TFPss_b, self.average_firm_size_b, self.E_star_b, _, \\\n _, self.N_ss_b, self.w_ss_b, self.cdf_stationary_b, self.cdf_emp_b = self.solve_stationary_equilibrium()\n \n print(\"\\n-----------------------------------------\")\n print(\"Benchmark Stationary Equilibrium\")\n print(\"-----------------------------------------\")\n print(f\"ss output = {self.Yss_b:.2f}\")\n print(f\"ss capital = {self.Kss_b:.2f}\")\n print(f\"ss tfp = {self.TFPss_b:.2f}\")\n print(f\"ss wage = {self.w_ss_b:.2f}\")\n print(f\"entry mass = {self.E_star_b:.3f}\")\n print(f\"avg. firm size = {self.average_firm_size_b:.2f}\")\n \n #b. plot (note that the distributions plotted here are unaffected by the distortionary policies)\n \n if self.plott:\n #i. initialize\n employed = [4.99, 49.99]\n firm_size_by_employee = np.zeros(len(employed)+1)\n share_employment = np.zeros(len(employed)+1)\n \n \n #i. percentage of firms that employ employed\n \n for i_e in range(len(employed)):\n summ = np.sum(firm_size_by_employee)\n interpolate = self.interpol(self.labor_demand_rel, self.cdf_stationary_b, employed[i_e])[0] #labor_demand_rel is labor demand with the lowest value normalized to 1\n firm_size_by_employee[i_e] = interpolate - summ\n firm_size_by_employee[-1] = 1 - np.sum(firm_size_by_employee)\n \n plt.pie(firm_size_by_employee, labels=['<5','5<50','50 =<'], autopct=\"%.1f%%\")\n plt.title('Size of Firms by Firm Size (Number of Employees)')\n plt.savefig('firm_size_rr08.pdf')\n plt.show()\n \n \n #ii. employment percentage by firm size\n for i_e in range(len(employed)):\n summ = np.sum(share_employment)\n interpolate = self.interpol(self.labor_demand_rel, self.cdf_emp_b , employed[i_e])[0]\n share_employment[i_e] = interpolate - summ\n share_employment[-1] = 1 - np.sum(share_employment)\n \n plt.pie(share_employment, labels=['<5','5<50','50 =<'], autopct=\"%.1f%%\")\n plt.title('Employment Share by Firm Size (Number of Employees)')\n plt.savefig('employment_by_firm_size_rr08.pdf')\n plt.show()\n \n #iii. productivity cdf and employment cdf\n plt.plot(self.grid_s, self.cdf_stationary_b)\n plt.plot(self.grid_s, self.cdf_emp_b)\n plt.title('Stationary CDF' )\n plt.xlabel('Productivity level')\n plt.ylabel('Cumulative Sum')\n plt.legend(['Firms by Productivity Level','Share of Employment'])\n plt.savefig('cdf_rr08.pdf')\n plt.show()\n \n \n \n #c. distortion case\n \n #i. joint pdf of productivity state and tau \n self.make_joint_pdf(0)\n \n #ii. compute stationary economy for each tau\n \n for idx, tau in enumerate(self.tau_vector):\n \n #iii. find the subsidy rate that generates the same capital stock as in benchmark economy\n self.tau_s[idx] = self.find_subsidy_rate(tau)\n \n # set tax system with newly found tau_s and given tau\n tauv = np.array([-self.tau_s[idx], self.excempt_frac, tau]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(tauv) #set tax system\n \n #v. distorted stationary equilibrium\n self.Yss_d[idx], self.Kss_d[idx], self.TFPss_d[idx], self.average_firm_size_d[idx], self.E_star_d[idx], \\\n self.Y_set_d[idx,:], self.subsidy_size_d[idx], self.N_ss_d[idx], self.w_ss_d[idx],\\\n _, _ = self.solve_stationary_equilibrium()\n \n print(\"\\n-----------------------------------------\")\n print(\"Distorted Stationary Equilibrium\")\n print(\"-----------------------------------------\\n\")\n if self.distortion_case == 1:\n print(\"Tax/Subidy Uncorrelated with Firm Level Producitivity\\n\")\n elif self.distortion_case == 2:\n print(\"Tax/Subidy Negatively Correlated with Firm Level Producitivity\")\n print(\"(low productivity firms recieve subsidy, high productivity taxed)\\n\")\n elif self.distortion_case == 2:\n print(\"Tax/Subidy Positively Correlated with Firm Level Producitivity\")\n print(\"(high productivity firms recieve subsidy, low productivity taxed)\\n\")\n if self.policy_type == 1 :\n print(\"Tax Type: Tax on output\\n\")\n elif self.policy_type == 2 :\n print(\"Tax Type: Tax on capital\\n\")\n elif self.policy_type == 3 :\n print(\"Tax Type: Tax on labor\\n\")\n print(f\"fraction of firms recieving subsidy = {self.subsidy_frac:.2f}\")\n print(f\"fraction of firms taxed = {1-self.subsidy_frac-self.excempt_frac:.2f}\")\n print(f\"fraction of firms excempt = {self.excempt_frac:.2f}\")\n print(\"-----------------------------------------\\n\")\n \n print(tabulate([['relative Yss', round(self.Yss_d[0]/self.Yss_b, 2), round(self.Yss_d[1]/self.Yss_b, 2), round(self.Yss_d[2]/self.Yss_b, 2), round(self.Yss_d[3]/self.Yss_b, 2)],\n ['relative TFPss', round(self.TFPss_d[0]/self.TFPss_b, 2), round(self.TFPss_d[1]/self.TFPss_b, 2), round(self.TFPss_d[2]/self.TFPss_b, 2), round(self.TFPss_d[3]/self.TFPss_b, 2)], \n ['relative entry mass', round(self.E_star_d[0]/self.E_star_b, 2), round(self.E_star_d[1]/self.E_star_b, 2), round(self.E_star_d[2]/self.E_star_b, 2), round(self.E_star_d[3]/self.E_star_b, 2)],\n ['share of subsidized output', round(self.Y_set_d[0,0], 2), round(self.Y_set_d[1,0], 2), round(self.Y_set_d[2,0], 2), round(self.Y_set_d[3,0], 2)],\n ['total subsidy paid of output', round(self.subsidy_size_d[0], 2), round(self.subsidy_size_d[1], 2), round(self.subsidy_size_d[2], 2), round(self.subsidy_size_d[3], 2)],\n ['subsidy rate (tau_s)', round(self.tau_s[0], 2), round(self.tau_s[1], 2), round(self.tau_s[2], 2), round(self.tau_s[3], 2)],\n [], \n ['relative Kss', round(self.Kss_d[0]/self.Kss_b, 2), round(self.Kss_d[1]/self.Kss_b, 2), round(self.Kss_d[2]/self.Kss_b, 2), round(self.Kss_d[3]/self.Kss_b, 2)], \n ['relative wss', round(self.w_ss_d[0]/self.w_ss_b, 2), round(self.w_ss_d[1]/self.w_ss_b, 2), round(self.w_ss_d[2]/self.w_ss_b, 2), round(self.w_ss_d[3]/self.w_ss_b, 2)], \n ['relative Nss', round(self.N_ss_d[0]/self.N_ss_b, 2), round(self.N_ss_d[1]/self.N_ss_b, 2), round(self.N_ss_d[2]/self.N_ss_b, 2), round(self.N_ss_d[3]/self.N_ss_b, 2)], \n ['relative avg. firm size', round(self.average_firm_size_d[0]/self.average_firm_size_b, 2), round(self.average_firm_size_d[1]/self.average_firm_size_b, 2), round(self.average_firm_size_d[2]/self.average_firm_size_b, 2), round(self.average_firm_size_d[3]/self.average_firm_size_b, 2)]],\n headers=['Variable', 'Tax = '+str(self.tau_vector[0]), \"Tax = \"+str(self.tau_vector[1]), 'Tax = '+str(self.tau_vector[2]), 'Tax = '+str(self.tau_vector[3])]))\n \n\n t1 = time.time()\n print(f'\\nTotal Run Time: {t1-t0:.2f} seconds')", "def main():\n \n welcome()\n myBill = get_bill_amt()\n pct = get_tip_pct()\n tip = calc_tip(myBill, pct)\n show_results(myBill, tip, pct)", "def set_costs(self) -> None:\n self[\"glider cost\"] = (\n self[\"glider base mass\"] * self[\"glider cost slope\"]\n + self[\"glider cost intercept\"]\n )\n self[\"lightweighting cost\"] = (\n self[\"glider base mass\"]\n * self[\"lightweighting\"]\n * self[\"glider lightweighting cost per kg\"]\n )\n self[\"electric powertrain cost\"] = (\n self[\"electric powertrain cost per kW\"] * self[\"electric power\"]\n )\n self[\"combustion powertrain cost\"] = (\n self[\"combustion power\"] * self[\"combustion powertrain cost per kW\"]\n )\n self[\"fuel cell cost\"] = self[\"fuel cell power\"] * self[\"fuel cell cost per kW\"]\n self[\"power battery cost\"] = (\n self[\"battery power\"] * self[\"power battery cost per kW\"]\n )\n self[\"energy battery cost\"] = (\n self[\"energy battery cost per kWh\"] * self[\"electric energy stored\"]\n )\n self[\"fuel tank cost\"] = self[\"fuel tank cost per kg\"] * self[\"fuel mass\"]\n # Per km\n self[\"energy cost\"] = self[\"energy cost per kWh\"] * self[\"TtW energy\"] / 3600\n\n # For battery, need to divide cost of electricity\n # at battery by efficiency of charging\n # to get costs at the \"wall socket\".\n\n _ = lambda x: np.where(x == 0, 1, x)\n self[\"energy cost\"] /= _(self[\"battery charge efficiency\"])\n\n self[\"component replacement cost\"] = (\n self[\"energy battery cost\"] * self[\"battery lifetime replacements\"]\n + self[\"fuel cell cost\"] * self[\"fuel cell lifetime replacements\"]\n )\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n to_markup = yaml.safe_load(stream)[\"markup\"]\n\n self[to_markup] *= self[\"markup factor\"]\n\n # calculate costs per km:\n self[\"lifetime\"] = self[\"lifetime kilometers\"] / self[\"kilometers per year\"]\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n purchase_cost_params = yaml.safe_load(stream)[\"purchase\"]\n\n self[\"purchase cost\"] = self[purchase_cost_params].sum(axis=2)\n # per km\n amortisation_factor = self[\"interest rate\"] + (\n self[\"interest rate\"]\n / (\n (np.array(1) + self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n - np.array(1)\n )\n )\n self[\"amortised purchase cost\"] = (\n self[\"purchase cost\"] * amortisation_factor / self[\"kilometers per year\"]\n )\n\n # per km\n self[\"maintenance cost\"] = (\n self[\"maintenance cost per glider cost\"]\n * self[\"glider cost\"]\n / self[\"kilometers per year\"]\n )\n\n # simple assumption that component replacement\n # occurs at half of life.\n self[\"amortised component replacement cost\"] = (\n (\n self[\"component replacement cost\"]\n * (\n (np.array(1) - self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n / 2\n )\n )\n * amortisation_factor\n / self[\"kilometers per year\"]\n )\n\n self[\"total cost per km\"] = (\n self[\"energy cost\"]\n + self[\"amortised purchase cost\"]\n + self[\"maintenance cost\"]\n + self[\"amortised component replacement cost\"]\n )", "def main():\n # Retrieve csv_file name an max_investment from argument passed in console:\n arg_csv_file, arg_max_investment = set_arg()\n if arg_csv_file:\n csv_file = arg_csv_file\n else:\n csv_file = 'dataset1_Python+P7.csv'\n if arg_max_investment:\n max_investment = float(arg_max_investment)\n else:\n max_investment = 500.00\n\n # Retrieve dataset:\n base_dataset = open_convert_and_clean_csv(csv_file)\n\n # Retrieve solution:\n start = time.perf_counter()\n print()\n print(f\"Processing with file '{csv_file}' containing {len(base_dataset)} shares...\")\n print(f\"Maximum investment: {max_investment}€\")\n print(\"Please wait...\")\n computable_dataset = add_roi_to_dataset(convert_dataset_to_cents(base_dataset))\n best_roi, combination = best_combination_dynamic(computable_dataset, max_investment)\n\n # Formatting results:\n combination.sort(key=lambda x: x[2], reverse=True)\n combination_in_euros = convert_dataset_to_euros(combination)\n best_roi /= 100\n # Following calculus is done on cent prices (combination) to avoid approximations with floats\n shares_cost = calculate_shares_cost_sum(combination) / 100\n\n # Printing results:\n print()\n print(f\"Length of dataset: {len(computable_dataset)}\")\n print(f\"Duration of Analysis: {elapsed_time_formatted(start)}\")\n print()\n print(f\"Best Return on investment after 2 years: {round(best_roi, 2)}€\")\n print(f\"Number of shares to buy : {len(combination_in_euros)}\")\n print(f\"Total cost: {round(shares_cost, 2)}€\")\n print()\n print(f\"Best combination of shares ordered by performance: \")\n for share in combination_in_euros:\n print(f\"{share[0]} | Price: {share[1]}€ | profit: {share[2]}%\")\n print()", "def calculate_economics(\n irradiance: pd.DataFrame, temperature: pd.DataFrame, wind_speed: pd.DataFrame,\n CECMod: pd.DataFrame, configuration: float = 1\n ):\n p_out = calculate_dc_output(irradiance, temperature, wind_speed, CECMod=CECMod)\n\n # convert dc to AC - considering a flat loss of 14%\n # we have to improve this in the future\n p_out = [v * 0.86 for v in p_out]\n\n day_count = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n monthly_electricity = []\n\n for month in range(12):\n st_index = sum(day_count[:month + 1]) * 24\n end_index = sum(day_count[:month + 2]) * 24\n data = p_out[st_index: end_index]\n # Note: division by 50 is to match the values - remove it later!\n monthly_electricity.append(sum(data) / len(data) / 50)\n\n total_ac_energy = sum(p_out)\n monthly_ac_energy = pd.DataFrame(\n zip(calendar.month_abbr[1:], monthly_electricity),\n columns=['month', 'Thousand kWh']\n )\n\n # Based on the example here: https://nrel-pysam.readthedocs.io/en/master/Import.html\n\n grid = Grid.default(\"PVWattsCommercial\")\n ur = UtilityRate.from_existing(grid, \"PVWattsCommercial\")\n cl = Cashloan.from_existing(grid,\"PVWattsCommercial\")\n\n sam_data = read_sam_data(configuration)\n for module, data in zip([grid, ur, cl], sam_data[:-1]):\n for k, v in data.items():\n if k == 'number_inputs':\n continue\n try:\n module.value(k, v)\n except AttributeError:\n print(module, k, v)\n\n\n grid.SystemOutput.gen = p_out\n\n grid.execute()\n ur.execute()\n cl.execute()\n\n # list possible outputs here\n adjusted_installed_cost = cl.Outputs.adjusted_installed_cost\n payback_cash_flow = [-1 * x for x in cl.Outputs.cf_discounted_payback]\n\n return total_ac_energy, monthly_ac_energy, adjusted_installed_cost, payback_cash_flow", "def run(self):\n model = self.model\n self.summary_cards(model)\n self.hospitalizations_chart(model)\n self.available_beds_chart(model)\n self.write_population_info(model)\n self.write_age_distribution_chart(model)\n self.write_fatalities_chart(model)\n self.write_healthcare_parameters(model)\n self.write_epidemiological_parameters(model)\n self.write_footnotes(model)", "def run(self):\n import sacc\n import healpy\n import treecorr\n # Load the different pieces of data we need into\n # one large dictionary which we accumulate\n data = {}\n self.load_tomography(data)\n self.load_shear_catalog(data)\n self.load_random_catalog(data)\n # This one is optional - this class does nothing with it\n self.load_lens_catalog(data)\n # Binning information\n self.read_nbin(data)\n\n # Calculate metadata like the area and related\n # quantities\n meta = self.calculate_metadata(data)\n\n # Choose which pairs of bins to calculate\n calcs = self.select_calculations(data)\n\n sys.stdout.flush()\n \n # This splits the calculations among the parallel bins\n # It's not necessarily the most optimal way of doing it\n # as it's not dynamic, just a round-robin assignment,\n # but for this case I would expect it to be mostly fine\n results = []\n for i,j,k in self.split_tasks_by_rank(calcs):\n results += self.call_treecorr(data, i, j, k)\n\n # If we are running in parallel this collects the results together\n results = self.collect_results(results)\n\n # Save the results\n if self.rank==0:\n self.write_output(data, meta, results)", "def set_all(self):\n\n self.ecm = EnergyConsumptionModel(\n vehicle_type=\"car\",\n vehicle_size=list(self.array.coords[\"size\"].values),\n powertrains=list(self.array.coords[\"powertrain\"].values),\n cycle=self.cycle,\n gradient=self.gradient,\n country=self.country,\n )\n\n diff = 1.0\n\n while diff > 0.0001:\n old_driving_mass = self[\"driving mass\"].sum().values\n self.set_vehicle_mass()\n self.set_power_parameters()\n self.set_component_masses()\n self.set_auxiliaries()\n self.set_power_battery_properties()\n self.set_battery_properties()\n self.set_energy_stored_properties()\n self.set_recuperation()\n\n if \"FCEV\" in self.array.powertrain.values:\n self.set_fuel_cell_power()\n self.set_fuel_cell_mass()\n\n # if user-provided values are passed,\n # they override the default values\n if \"capacity\" in self.energy_storage:\n self.override_battery_capacity()\n\n diff = (self[\"driving mass\"].sum().values - old_driving_mass) / self[\n \"driving mass\"\n ].sum()\n\n self.set_ttw_efficiency()\n self.calculate_ttw_energy()\n self.set_ttw_efficiency()\n\n self.set_range()\n\n if self.target_range:\n self.override_range()\n\n self.set_share_recuperated_energy()\n self.set_battery_fuel_cell_replacements()\n self.adjust_cost()\n\n self.set_electric_utility_factor()\n self.set_electricity_consumption()\n self.set_costs()\n self.set_hot_emissions()\n self.set_particulates_emission()\n self.set_noise_emissions()\n self.create_PHEV()\n if self.drop_hybrids:\n self.drop_hybrid()\n\n self.remove_energy_consumption_from_unavailable_vehicles()", "def run(self):\n \n # shortcut for self\n s = self\n \n # shortcut to existing heating fuel\n fuel = s.exist_fuel\n\n # holds summary measures for the heat pump project (e.g. seasonal COP,\n # internal rate of return). Fill out first item: secondary fuel info.\n s.summary = {'fuel_unit': fuel.unit, 'fuel_desc': fuel.desc}\n \n # Create the home energy simulation object\n sim = HomeHeatModel(\n city_id=s.city_id,\n hp_model_id=s.hp_model_id,\n exist_heat_fuel_id=s.exist_heat_fuel_id,\n exist_heat_effic=s.exist_heat_effic,\n exist_kwh_per_mmbtu=s.exist_kwh_per_mmbtu, \n co2_lbs_per_kwh=s.co2_lbs_per_kwh,\n low_temp_cutoff=s.low_temp_cutoff,\n off_months=s.off_months_chks,\n garage_stall_count=s.garage_stall_count,\n garage_heated_by_hp=s.garage_heated_by_hp,\n bldg_floor_area=s.bldg_floor_area,\n indoor_heat_setpoint=s.indoor_heat_setpoint,\n insul_level=s.insul_level,\n pct_exposed_to_hp=s.pct_exposed_to_hp,\n doors_open_to_adjacent=s.doors_open_to_adjacent,\n bedroom_temp_tolerance=s.bedroom_temp_tolerance, \n )\n\n # If other end uses use the heating fuel, make an estimate of their annual\n # consumption of that fuel. This figure is expressed in the physical unit\n # for the fuel type, e.g. gallons of oil. Save this as an object attribute\n # so it is accessible in other routines. See Evernote notes on values (AkWarm\n # for DHW and Michael Bluejay for Drying and Cooking).\n is_electric = (s.exist_heat_fuel_id == constants.ELECTRIC_ID) # True if Electric\n s.fuel_other_uses = s.includes_dhw * 4.23e6 / fuel.dhw_effic\n s.fuel_other_uses += s.includes_dryer * (0.86e6 if is_electric else 2.15e6)\n s.fuel_other_uses += s.includes_cooking * (0.64e6 if is_electric else 0.8e6)\n s.fuel_other_uses *= s.occupant_count / fuel.btus\n\n # For elecric heat we also need to account for lights and other applicances not\n # itemized above.\n if is_electric:\n # Use the AkWarm Medium Lights/Appliances formula but take 25% off\n # due to efficiency improvements since then.\n s.lights_other_elec = 2086. + 1.20 * s.bldg_floor_area # kWh in the year\n else:\n s.lights_other_elec = 0.0\n \n # Match the existing space heating use if it is provided. Do so by using\n # the UA true up factor.\n if not is_null(s.exist_fuel_use):\n \n # Remove the energy use from the other end uses that use the fuel, unless\n # this is electric heat and the user indicated that the entered value is\n # just space heating.\n if is_electric and s.elec_uses=='space':\n # user explicitly indicated that the entered annual usage value is\n # just space heating.\n space_fuel_use = s.exist_fuel_use\n else:\n space_fuel_use = s.exist_fuel_use - s.fuel_other_uses - s.lights_other_elec\n\n sim.no_heat_pump_use = True\n sim.calculate()\n if is_electric:\n # For electric heat, electric use for space heat is in secondary_kwh\n fuel_use1 = sim.annual_results().secondary_kwh\n else:\n fuel_use1 = sim.annual_results().secondary_fuel_units\n \n # scale the UA linearly to attempt to match the target fuel use\n ua_true_up = space_fuel_use / fuel_use1\n sim.ua_true_up = ua_true_up\n sim.calculate()\n\n if is_electric:\n # For electric heat, electric use for space heat is in secondary_kwh\n fuel_use2 = sim.annual_results().secondary_kwh\n else:\n fuel_use2 = sim.annual_results().secondary_fuel_units\n \n # In case it wasn't linear, inter/extrapolate to the final ua_true_up\n slope = (fuel_use2 - fuel_use1)/(ua_true_up - 1.0)\n # print(space_fuel_use, fuel_use1, fuel_use2, ua_true_up)\n ua_true_up = 1.0 + (space_fuel_use - fuel_use1) / slope\n # print(ua_true_up)\n\n else:\n ua_true_up = 1.0\n \n # Set the UA true up value into the model and also save it as\n # an attribute of this object so it can be observed.\n sim.ua_true_up = ua_true_up\n s.ua_true_up = ua_true_up\n \n # Run the base case with no heat pump and record energy results.\n # This model only models the space heating end use.\n sim.no_heat_pump_use = True\n sim.calculate()\n s.df_mo_en_base = sim.monthly_results()\n s.ann_en_base = sim.annual_results()\n # print(s.ann_en_base.secondary_kwh)\n \n # Run the model with the heat pump and record energy results\n sim.no_heat_pump_use = False\n sim.calculate()\n s.df_mo_en_hp = sim.monthly_results()\n s.ann_en_hp = sim.annual_results()\n s.df_hourly = sim.df_hourly\n\n # record design heat load\n s.summary['design_heat_load'], s.summary['design_heat_temp'] = sim.design_heat_load()\n \n # Calculate some summary measures\n s.summary['cop'] = s.ann_en_hp.cop\n s.summary['hp_max_capacity_5F'] = sim.hp_max_capacity_5F()\n s.summary['max_hp_reached'] = sim.max_hp_reached\n \n # CO2 savings\n s.summary['co2_lbs_saved'] = s.ann_en_base.co2_lbs - s.ann_en_hp.co2_lbs\n s.summary['co2_driving_miles_saved'] = convert_co2_to_miles_driven(s.summary['co2_lbs_saved'])\n s.summary['hp_load_frac'] = s.ann_en_hp.hp_load_mmbtu / (s.ann_en_hp.hp_load_mmbtu + s.ann_en_hp.secondary_load_mmbtu)\n \n # Create DataFrames that hold monthly energy cost amounts\n # Results are stored as object attributes.\n self.calc_monthly_cash()\n \n # Create a multi-year Cash Flow DataFrame and summary economic measures.\n # Results are stored as object attributes.\n self.calc_cash_flow()\n\n # Save a gzipped pickle of this object using Unix time as the file name.\n # make a directory to hold the files\n save_dir = 'hpcalc_runs'\n Path(save_dir).mkdir(exist_ok=True)\n fname = f'{time.time():.2f}.pkl.gz'\n s.file_name = fname\n pickle.dump(self, gzip.open(f'{save_dir}/{fname}', 'wb'))", "def execute_30(**args):\n\n output_directory = carbon_utils.setup_dirs(args['workspace_dir'], 'output')\n\n if args['carbon_price_units'] == 'Carbon Dioxide (CO2)':\n #Convert to price per unit of Carbon do this by dividing\n #the atomic mass of CO2 (15.9994*2+12.0107) by the atomic\n #mass of 12.0107. Values gotten from the periodic table of\n #elements.\n args['V'] *= (15.9994*2+12.0107)/12.0107\n\n LOGGER.info('Constructing valuation formula.')\n n = args['yr_fut'] - args['yr_cur'] - 1\n ratio = 1.0 / ((1 + args['r'] / 100.0) * (1 + args['c'] / 100.0))\n valuation_constant = args['V'] / (args['yr_fut'] - args['yr_cur']) * \\\n (1.0 - ratio ** (n + 1)) / (1.0 - ratio)\n\n nodata_out = -1.0e10\n\n outputs = _make_outfile_uris(output_directory, args)\n\n conf_uris = {}\n if args.get('conf_uri'):\n conf_uris['base'] = args['conf_uri']\n if args.get('conf_redd_uri'):\n conf_uris['redd'] = args['conf_redd_uri']\n\n for scenario_type in ['base', 'redd']:\n try:\n sequest_uri = outputs['sequest_%s' % scenario_type]\n except KeyError:\n # REDD analysis might not be enabled, so just keep going.\n continue\n\n LOGGER.info('Beginning valuation of %s scenario.', scenario_type)\n\n sequest_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(sequest_uri)\n\n def value_op(sequest):\n if sequest == sequest_nodata:\n return nodata_out\n return sequest * valuation_constant\n\n pixel_size_out = pygeoprocessing.geoprocessing.get_cell_size_from_uri(sequest_uri)\n pygeoprocessing.geoprocessing.vectorize_datasets(\n [sequest_uri], value_op, outputs['%s_val' % scenario_type],\n gdal.GDT_Float32, nodata_out, pixel_size_out, \"intersection\")\n\n\n if scenario_type in conf_uris:\n LOGGER.info('Creating masked rasters for %s scenario.', scenario_type)\n # Produce a raster for sequestration, masking out uncertain areas.\n _create_masked_raster(sequest_uri, conf_uris[scenario_type],\n outputs['%s_seq_mask' % scenario_type])\n\n # Produce a raster for value sequestration,\n # again masking out uncertain areas.\n _create_masked_raster(\n outputs['%s_val' % scenario_type],\n conf_uris[scenario_type],\n outputs['%s_val_mask' % scenario_type])\n\n if 'uncertainty_data' in args:\n uncertainty_data = _compute_uncertainty_data(\n args['uncertainty_data'], valuation_constant)\n if uncertainty_data:\n outputs['uncertainty_data'] = uncertainty_data\n\n return outputs", "def calc_maintenance_cost(self):\n\n self.maintenance_cost = self.capital_costs * .01", "def print_analysis_prices(pv, demand,retail,export, param, E,isCommunity=False,hh=None):\n RemainingSOC=E['LevelOfCharge'][-1]\n timestep = param['timestep']\n SelfConsumption = np.sum(E['inv2load']) * timestep # AC\n TotalFromGrid = np.sum(E['grid2load']) * timestep # AC\n TotalToGrid = np.sum(E['inv2grid']) * timestep # AC\n TotalLoad = demand.sum() * timestep # AC\n #TotalBattToLoad = np.sum(E['store2load']) * timestep # AC\n TotalBattToGrid = np.sum(E['store2grid']) * timestep # AC\n TotalPV = pv.sum() * timestep # DC\n TotalBatteryGeneration = np.sum(E['store2inv']) * timestep # DC\n TotalBatteryConsumption = np.sum(E['pv2store']) * timestep # DC\n if 'inv_losses' in E.keys():\n BatteryLosses=E['batt_losses'].sum()*timestep\n InverterLosses=E['inv_losses'].sum()*timestep\n else:\n BatteryLosses = TotalBatteryConsumption * (1 - param['BatteryEfficiency'])\n InverterLosses = (TotalPV - BatteryLosses-RemainingSOC) * (1 - param['InverterEfficiency'])\n SelfConsumptionRate = SelfConsumption / TotalPV * 100 # in %\n SelfSufficiencyRate = SelfConsumption / TotalLoad * 100\n Bill=((E['grid2load'] * timestep) * retail - (E['inv2grid'] * timestep ) * export).sum()\n Batt_revenue=((E['store2load']*param['InverterEfficiency']*timestep*retail-\n E['pv2store']*param['InverterEfficiency']*timestep*export)).sum()\n \n print ('Total yearly consumption: {:1g} kWh'.format(TotalLoad))\n print ('Total PV production: {:1g} kWh'.format(TotalPV))\n print ('Self Consumption: {:1g} kWh'.format(SelfConsumption))\n print ('Total fed to the grid: {:1g} kWh'.format(TotalToGrid))\n print ('Total bought from the grid: {:1g} kWh'.format(TotalFromGrid))\n print ('Self consumption rate (SCR): {:.3g}%'.format(SelfConsumptionRate))\n print ('Self sufficiency rate (SSR): {:.3g}%'.format(SelfSufficiencyRate))\n print ('Amount of energy provided by the battery: {:1g} kWh'.format(TotalBatteryGeneration))\n print ('Total battery losses: {:1g} kWh, i.e., {:1g}% of the total PV'.format(BatteryLosses,BatteryLosses/TotalPV*100))\n #print('Total energy from battery to the load {:1g} kWh'.format(TotalBattToLoad))\n print('Total energy from battery to the grid {:1g} kWh'.format(TotalBattToGrid))\n #print ('Total inverter losses: {:1g} kWh'.format(InverterLosses))\n #print ('Total inverter losses: {:1g} kWh'.format(InverterLosses))\n print ('Total inverter losses: {:1g} kWh, i.e., {:1g}% of the total PV'.format(InverterLosses,InverterLosses/TotalPV*100))\n \n \n TotalCurtailment=np.sum(E['inv2curt'])*timestep # DC\n print ('Total curtailment : {:1g} kWh'.format(TotalCurtailment)) \n residue = TotalPV + TotalFromGrid - TotalToGrid - BatteryLosses - InverterLosses - TotalLoad - TotalCurtailment - RemainingSOC\n print ('Residue (check): {:1g} kWh'.format(residue))\n PV_check = TotalPV - SelfConsumption - TotalToGrid - BatteryLosses - InverterLosses - TotalCurtailment - RemainingSOC\n print ('PV Residue (check): {:1g} kWh'.format(PV_check))\n \n print(bcolors.WARNING + 'Maximum power injected into the grid is {:1g} kW'.format(E['inv2grid'].max())+bcolors.ENDC)\n print(bcolors.WARNING + 'Maximum power drained from the grid is {:1g} kW'.format(E['grid2load'].max())+bcolors.ENDC)\n print (bcolors.WARNING + 'Total bill: {:1g}\\n\\n'.format(Bill)+bcolors.ENDC)\n print (bcolors.WARNING + 'Total Batt_revenue: {:1g}\\n\\n'.format(Batt_revenue)+bcolors.ENDC)\n \n if isCommunity==False:\n AverageDepth = TotalBatteryGeneration / (365 * param['BatteryCapacity'])\n Nfullcycles = 365 * AverageDepth \n print ('Number of equivalent full cycles per year: {:1g} '.format(Nfullcycles))\n print ('Average Charging/Discharging depth: {:1g}\\n\\n'.format(AverageDepth))\n \n out = { 'SCR': SelfConsumptionRate, # \n 'SSR':SelfSufficiencyRate, # \n 'EFC': Nfullcycles, # \n 'Demand_peak': E['grid2load'].max(), # \n 'Inj_peak': E['inv2grid'].max(), #\n 'avg_dod': AverageDepth, #\n 'bill': Bill,\n 'Batt_revenue':Batt_revenue,\n 'Batt_penetration':param['batt_penetration'],\n 'PV_penetration':param['pv_penetration'],\n 'seed':param['seed'],\n 'hh':hh\n }\n else:\n out = { 'SCR': SelfConsumptionRate, # \n 'SSR':SelfSufficiencyRate, # \n 'EFC': None, # \n 'Demand_peak': E['grid2load'].max(), # \n 'Inj_peak': E['inv2grid'].max(), #\n 'avg_dod': None, #\n 'bill': Bill,\n 'Batt_revenue':Batt_revenue,\n 'Batt_penetration':param['batt_penetration'],\n 'PV_penetration':param['pv_penetration'],\n 'seed':param['seed'],\n 'hh':hh\n }\n return out", "def calc_maintenance_cost (self):\n\n if str(self.comp_specs['operational costs']) \\\n != 'UNKNOWN':\n self.maintenance_cost = \\\n self.comp_specs['operational costs']\n else:\n self.maintenance_cost = \\\n (self.comp_specs['percent o&m'] / 100.0) * self.capital_costs\n #~ print 'self.maintenance_cost',self.maintenance_cost", "def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._estimate_edens_()\n self._compute_()\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n if self.verbose: print(\"\\n Processing Doppler.\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec", "def run(self):\n # get components list\n #component_id_list = self.getComponentsList()\n asset_id = 3776\n component_id_list = self.get_component_info_for_one_asset(asset_id)\n # call computeResults method\n results = self.compute_results(component_id_list)\n # write to the output file\n self.write_to_file(results)", "def run_baseline_simulation(self):\n n_days_base = 1 # Only consider 1 day simulation, self.n_days_base\n sim_time = 24*3600 # one day in seconds\n \n print(\"Running day-ahead baseline simulation ...\") \n print(\"Running baseline right away charging strategy ...\")\n baseline_soc, baseline_std_soc, baseline_power, baseline_cycles, baseline_Tin, baseline_std_Tin, baseline_Tin_max, baseline_Tin_min = self.run_baseline_right_away(n_days_base, sim_time)\n \n print(\"Exported baseline soc, Temperatures, power and HVAC cycles ...\")\n \n base_path = dirname(abspath(__file__))\n path = join(base_path,'data')\n \n # Already saved inside the right away function\n # baseline_soc.to_csv(join(path, r'SOC_baseline.csv'), index = False)\n # baseline_power.to_csv(join(path, r'power_baseline.csv'), index = False)\n # baseline_Tin.to_csv(join(path, r'Tin_baseline.csv'), index = False)\n # baseline_Tin_max.to_csv(join(path, r'Tin_max_baseline.csv'), index = False)\n # baseline_Tin_min.to_csv(join(path, r'Tin_min_baseline.csv'), index = False)\n print(\"Exported\")", "def mc_main(self, end_plant_tuple, h2_demand, year=2021, centralised=True, pipeline=True, max_pipeline_dist=2000,\r\n iterations=1000, elec_type='alkaline'):\r\n\r\n df = pd.read_csv(filepath_or_buffer=\"Data/renewables.csv\", index_col=0)\r\n\r\n total_cost_per_kg_h2 = np.zeros((iterations, len(df)))\r\n generation_cost_per_kg = np.zeros((iterations, len(df)))\r\n solar_cost = np.zeros((iterations, len(df)))\r\n wind_cost = np.zeros((iterations, len(df)))\r\n\r\n cost_end_nh3 = np.zeros(iterations)\r\n cost_end_lohc = np.zeros(iterations)\r\n cost_end_h2_liq = np.zeros(iterations)\r\n\r\n # Define parameters for generation costs\r\n year_diff, capex_extra, capex_h2, lifetime_hours, electrolyser_efficiency, elec_opex, other_capex_elec, water_cost, \\\r\n capex_wind, opex_wind, capex_solar, opex_factor_solar = define_gen_parameters(year, iterations, elec_type)\r\n\r\n for i in range(iterations):\r\n df, cost_end_nh3[i], cost_end_lohc[i], cost_end_h2_liq[i] = initial_geo_calcs(df, end_plant_tuple,\r\n centralised=centralised,\r\n pipeline=pipeline,\r\n max_pipeline_dist=max_pipeline_dist)\r\n\r\n for i in range(iterations):\r\n df = mc_generation_costs(df, h2_demand, year_diff, capex_extra[i], capex_h2[i], lifetime_hours,\r\n electrolyser_efficiency[i], elec_opex[i],\r\n other_capex_elec[i], water_cost[i],\r\n capex_wind[i], opex_wind[i], capex_solar[i], opex_factor_solar[i],\r\n interest=0.08, full_load_hours=2000)\r\n\r\n df = mc_transport_costs(df, end_plant_tuple, h2_demand, cost_end_nh3[i], cost_end_lohc[i], cost_end_h2_liq[i],\r\n centralised=centralised, pipeline=pipeline,\r\n max_pipeline_dist=max_pipeline_dist)\r\n\r\n df['Total Yearly Cost'] = df['Yearly gen. cost'] + df['Yearly Transport Cost']\r\n df['Total Cost per kg H2'] = df['Gen. cost per kg H2'] + df['Transport Cost per kg H2']\r\n\r\n total_cost_per_kg_h2[i, :] = df['Total Cost per kg H2'].to_numpy()\r\n generation_cost_per_kg[i, :] = df['Gen. cost per kg H2'].to_numpy()\r\n solar_cost[i, :] = df['Elec Cost Solar'].to_numpy()\r\n wind_cost[i, :] = df['Elec Cost Wind'].to_numpy()\r\n\r\n return df, total_cost_per_kg_h2, generation_cost_per_kg, solar_cost, wind_cost", "def run(self):\n\n # If this was a tanh model or some such thing, we're already done.\n if self.is_phenom:\n return\n if self.is_complete:\n print(\"Already ran simulation!\")\n return\n\n # Need to generate radiation backgrounds first.\n if self.pf['radiative_transfer']:\n self.medium.field.run()\n self._f_Jc = self.medium.field._f_Jc\n self._f_Ji = self.medium.field._f_Ji\n self._f_Jlw = self.medium.field._f_Jlw\n else:\n self._f_Jc = lambda z: 0.0\n self._f_Ji = lambda z: 0.0\n self._f_Jlw = lambda z: 0.0\n\n # Start timer\n t1 = time.time()\n\n tf = self.medium.tf\n self.medium._insert_inits()\n\n pb = self.pb = ProgressBar(tf, use=self.pf['progress_bar'],\n name='gs-21cm')\n\n # Lists for data in general\n self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm, \\\n self.all_RC_igm, self.all_RC_cgm = \\\n self.medium.all_t, self.medium.all_z, self.medium.all_data_igm, \\\n self.medium.all_data_cgm, self.medium.all_RCs_igm, self.medium.all_RCs_cgm\n\n # Add zeros for Ja\n for element in self.all_data_igm:\n element['Ja'] = np.zeros(self.grid.dims)\n element['Jc'] = np.zeros(self.grid.dims)\n element['Ji'] = np.zeros(self.grid.dims)\n element['Jlw'] = np.zeros(self.grid.dims)\n\n # List for extrema-finding\n self.all_dTb = self._init_dTb()\n for t, z, data_igm, data_cgm, rc_igm, rc_cgm in self.step():\n\n # Occasionally the progress bar breaks if we're not careful\n if z < self.pf['final_redshift']:\n break\n if z < self.pf['kill_redshift']:\n break\n\n # Delaying the initialization prevents progressbar from being\n # interrupted by, e.g., PrintInfo calls\n if not pb.has_pb:\n pb.start()\n\n pb.update(t)\n\n # Save data\n self.all_z.append(z)\n self.all_t.append(t)\n self.all_dTb.append(data_igm['dTb'][0])\n self.all_data_igm.append(data_igm.copy())\n self.all_RC_igm.append(rc_igm.copy())\n\n if self.pf['include_cgm']:\n self.all_data_cgm.append(data_cgm.copy())\n self.all_RC_cgm.append(rc_cgm.copy())\n\n # Automatically find turning points\n if self.pf['track_extrema']:\n if self.track.is_stopping_point(self.all_z, self.all_dTb):\n break\n\n pb.finish()\n\n self.history_igm = _sort_history(self.all_data_igm, prefix='igm_',\n squeeze=True)\n\n if self.pf['include_cgm']:\n self.history_cgm = _sort_history(self.all_data_cgm, prefix='cgm_',\n squeeze=True)\n else:\n self.history_cgm = {}\n\n self.history = self.history_igm.copy()\n self.history.update(self.history_cgm)\n\n ##\n # In the future, could do this better by only calculating Ja at\n # the end, since it a passive quantity (unless we included its\n # very small heating).\n ##\n #if self.pf['secondary_lya']:\n # xe = lambda zz: np.interp(zz, self.history['z'][-1::-1],\n # self.history['igm_e'][-1::-1])\n # self.medium.field.run(xe=xe)\n # self._f_Ja = self.medium.field._f_Ja\n # #self._f_Jlw = self.medium.field._f_Jlw\n #\n # # Fix Ja in history\n\n self.history['dTb'] = self.history['igm_dTb']\n #self.history['dTb_bulk'] = self.history['igm_dTb_bulk']\n\n self.history['Ts'] = self.history['igm_Ts']\n self.history['Jc'] = self.history['igm_Jc']\n self.history['Ji'] = self.history['igm_Ji']\n self.history['Ja'] = self.history['igm_Jc'] + self.history['igm_Ji']\n self.history['Jlw'] = self.history['igm_Jlw']\n\n # Save rate coefficients [optional]\n if self.pf['save_rate_coefficients']:\n self.rates_igm = \\\n _sort_history(self.all_RC_igm, prefix='igm_', squeeze=True)\n self.rates_cgm = \\\n _sort_history(self.all_RC_cgm, prefix='cgm_', squeeze=True)\n\n self.history.update(self.rates_igm)\n self.history.update(self.rates_cgm)\n\n self.history['t'] = np.array(self.all_t)\n self.history['z'] = np.array(self.all_z)\n\n ##\n # Optional extra radio background\n ##\n Tr = np.zeros_like(self.history['z'])\n for popid, pop in enumerate(self.pops):\n if not pop.is_src_radio:\n continue\n\n z, E, flux = self.field.get_history(popid, flatten=True)\n\n E21cm = h_p * nu_0_mhz * 1e6 / erg_per_ev\n f21 = interp1d(E, flux, axis=1, bounds_error=False,\n fill_value=0.0, force_scipy=True)\n flux_21cm = f21(E21cm)\n\n Tr += np.interp(self.history['z'], z, flux_21cm) \\\n * E21cm * erg_per_ev * c**2 / k_B / 2. / (nu_0_mhz * 1e6)**2\n\n if not np.all(Tr == 0):\n assert self.medium.parcel_igm.grid.hydr.Tbg is None\n elif self.medium.parcel_igm.grid.hydr.Tbg is not None:\n Tr = self.medium.parcel_igm.grid.hydr.Tbg(self.history['z'])\n\n self.history['Tr'] = Tr\n\n # Correct the brightness temperature if there are non-CMB backgrounds\n if not np.all(Tr == 0):\n zall = self.history['z']\n n_H = self.medium.parcel_igm.grid.cosm.nH(zall)\n Ts = self.medium.parcel_igm.grid.hydr.Ts(zall,\n self.history['igm_Tk'], self.history['Ja'],\n self.history['igm_h_2'], self.history['igm_e'] * n_H, Tr)\n\n if self.pf['floor_Ts']:\n Ts = max(Ts, self.medium.parcel_igm.grid.hydr.Ts_floor(z=zall))\n\n # Compute volume-averaged ionized fraction\n xavg = self.history['cgm_h_2'] \\\n + (1. - self.history['cgm_h_2']) * self.history['igm_h_2']\n\n # Derive brightness temperature\n dTb = self.medium.parcel_igm.grid.hydr.get_21cm_dTb(zall, Ts,\n xavg=xavg, Tr=Tr)\n\n self.history['dTb_no_radio'] = self.history['dTb'].copy()\n self.history['dTb'] = dTb\n\n #self.history['dTb_bulk'] = \\\n # self.medium.parcel_igm.grid.hydr.dTb(zall, 0.0, Ts, Tr)\n\n t2 = time.time()\n\n self.timer = t2 - t1\n\n self.is_complete = True", "def _output_performance(self):\n self.portfolio.create_equity_curve_dataframe()\n\n print(\"Creating summary statistics...\")\n stats = self.portfolio.output_summary_stats()\n\n print(\"Creating equity curve...\")\n print(self.portfolio.equity_curve.tail(10))\n pprint.pprint(stats)\n\n print(\"Signals: %s\" % self.signals)\n print(\"Orders: %s\" % self.orders)\n print(\"Fills: %s\" % self.fills)", "def execute(self, parameters, messages):\n \n #CHECK LICENSING\n #Advanced License\n status = arcpy.SetProduct('arcInfo')\n if status == 'CheckedOut':\n pass\n if status == 'AlreadyInitialized':\n pass\n if status == 'NotLicensed':\n arcpy.ExcecuteError(\"ERROR: ArcGIS Advanced licence is required to run this tool.\")\n if status == 'Failed':\n arcpy.ExcecuteError(\"ERROR: ArcGIS Advanced licence is required to run this tool.\")\n \n #Extensions\n if arcpy.CheckExtension(\"Spatial\") == \"Available\":\n arcpy.CheckOutExtension(\"Spatial\")\n else:\n arcpy.ExcecuteError(\"ERROR: The Spatial Analyst extension is required to run this tool.\")\n \n arcpy.env.overwriteOutput = True\n \n rapid_out_folder = parameters[0].valueAsText\n Drainage_Lines = parameters[1].valueAsText\n Stream_ID_DrainageLine = parameters[2].valueAsText\n Next_Down_ID = parameters[3].valueAsText\n length_field_DrainageLine = parameters[4].valueAsText\n Slope_field_DrainageLine = parameters[5].valueAsText\n Catchment_Features = parameters[6].valueAsText\n Stream_ID_Catchments = parameters[7].valueAsText\n Input_Reservoirs = parameters[8].valueAsText\n \n \n script_directory = os.path.dirname(__file__)\n arcpy.ImportToolbox(os.path.join(os.path.dirname(script_directory), \"RAPID Tools.pyt\"))\n \n #Create Network Connecitivty File\n out_network_connectivity_file = os.path.join(rapid_out_folder, \"rapid_connect.csv\")\n arcpy.CreateNetworkConnectivityFile_RAPIDTools(Drainage_Lines, \n Stream_ID_DrainageLine, \n Next_Down_ID,\n out_network_connectivity_file)\n # Create subset file\n out_subset_file = os.path.join(rapid_out_folder, \"riv_bas_id.csv\") \n arcpy.CreateSubsetFile_RAPIDTools(Drainage_Lines, Stream_ID_DrainageLine, out_subset_file)\n \n \n #Create Muksingum Parameters\n # Process: Muksingum k\n out_muskingum_kfac_file = os.path.join(rapid_out_folder, \"kfac.csv\")\n arcpy.CreateMuskingumKfacFile_RAPIDTools(in_drainage_line_features=Drainage_Lines, \n stream_ID=Stream_ID_DrainageLine, \n length=length_field_DrainageLine, \n slope=Slope_field_DrainageLine, \n co=1000.0/3600.0, \n in_formula=\"Eta*River Length/Sqrt(River Slope) [0.05, 0.95]\", \n in_network_connectivity_file=out_network_connectivity_file,\n out_muskingum_kfac_file=out_muskingum_kfac_file)\n \n out_muskingum_k_file = os.path.join(rapid_out_folder, \"k.csv\")\n arcpy.CreateMuskingumKFile_RAPIDTools(0.35, \n out_muskingum_kfac_file, \n out_muskingum_k_file)\n \n # Process: Muskingum x \n out_muskingum_x_file = os.path.join(rapid_out_folder, \"x.csv\")\n arcpy.CreateMuskingumXField_RAPIDTools(Drainage_Lines, Stream_ID_DrainageLine,\"0.3\", Input_Reservoirs)\n arcpy.CreateMuskingumXFile_RAPIDTools(Drainage_Lines, Stream_ID_DrainageLine, out_muskingum_x_file)\n\n if Catchment_Features:\n lsm_grid_directory = os.path.join(script_directory, \"lsm_grids\")\n \n # Create ECMWF Low Res Weight Table\n low_resolution_ecmwf_grid = os.path.join(lsm_grid_directory, \"runoff_ecmwf_tco639_grid.nc\")\n low_resolution_weight_table = os.path.join(rapid_out_folder, \"weight_ecmwf_tco639.csv\") \n arcpy.CreateWeightTableFromECMWFRunoff_RAPIDTools(low_resolution_ecmwf_grid,\n out_network_connectivity_file,\n Catchment_Features,\n Stream_ID_Catchments,\n low_resolution_weight_table) \n\n # Create ECMWF High Res Weight Table\n high_resolution_ecmwf_grid = os.path.join(lsm_grid_directory, \"runoff_ecmwf_t1279_grid.nc\")\n high_resolution_weight_table = os.path.join(rapid_out_folder, \"weight_ecmwf_t1279.csv\") \n arcpy.CreateWeightTableFromECMWFRunoff_RAPIDTools(high_resolution_ecmwf_grid,\n out_network_connectivity_file,\n Catchment_Features,\n Stream_ID_Catchments,\n high_resolution_weight_table) \n \n # Create ERA Interim Weight Table\n era_interim_ecmwf_grid = os.path.join(lsm_grid_directory, \"runoff_era_t511_grid.nc\")\n era_interim_weight_table = os.path.join(rapid_out_folder, \"weight_era_t511.csv\") \n arcpy.CreateWeightTableFromECMWFRunoff_RAPIDTools(era_interim_ecmwf_grid,\n out_network_connectivity_file,\n Catchment_Features,\n Stream_ID_Catchments,\n era_interim_weight_table) \n\n # Flowline to point\n out_point_file = os.path.join(rapid_out_folder, \"comid_lat_lon_z.csv\")\n arcpy.FlowlineToPoint_RAPIDTools(Drainage_Lines, out_point_file)\n\n return", "def _output_performance(self):\n self.portfolio.create_equity_curve_dataframe()\n \n print(\"Creating summary statistics...\")\n stats = self.portfolio.output_summary_stats()\n \n print(\"Creating equity curve...\")\n print(self.portfolio.equity_curve.tail(10))\n pprint.pprint(stats)\n \n print(\"Signals: %s\" % self.signals)\n print(\"Orders: %s\" % self.orders)\n print(\"Fills: %s\" % self.fills)", "def step(self, actions):\n\n representative_data = []\n original_data = []\n\n actions = np.array(actions).reshape(3, -1)\n\n for np_data, df_data, calculator, som, action in zip(self.np_data_list, self.df_data_list, self.calculators, self.som_objects, actions):\n\n representative_days, cluster_numbers = calculator.get_representative_days(\n som, np_data, action)\n\n representative_days = pd.DataFrame(representative_days)\n\n representative_days = self.wide_to_long(representative_days)\n approximation_calc = ApproximateData(df_data, 4)\n representative_days = ApproximateData(df_data, 4).get_load_duration_curve(\n representative_days, cluster_numbers)\n\n representative_data.append(representative_days)\n\n # original_days = approximation_calc.get_load_duration_curve(\n # year=\"2013\")\n\n\n\n # original_data.append(original_days)\n\n # metrics_calculator = Metrics(original_data[0], representative_data[0], original_data[1],\n # representative_data[1], original_data[2], representative_data[2], \"dc\")\n\n pv_original = pd.read_csv(\n '{}data/processed/resources/pv_processed.csv'.format(project_dir))\n wind_original = pd.read_csv(\n '{}data/processed/resources/onshore_processed.csv'.format(project_dir))\n load_original = pd.read_csv(\n '{}data/processed/demand/load_NG/load_processed_normalised.csv'.format(project_dir))\n\n pv_original_ldcs, wind_original_ldcs, load_original_ldcs = get_each_ldc(pv_original, wind_original, load_original)\n\n multi_year_metrics_calculator = MultiYearMetrics(pv_original_ldcs, representative_data[0], wind_original_ldcs, representative_data[1], load_original_ldcs, representative_data[2], self.year_start)\n multi_year_metrics = multi_year_metrics_calculator.get_multi_year_average_metrics(\"dc\")\n multi_year_metrics = multi_year_metrics.reset_index()\n # logger.debug(\"multi_year_metrics: \\n{}\".format(multi_year_metrics))\n\n nrmse = multi_year_metrics[multi_year_metrics['metric'] == 'nrmse dc'].iloc[0].value\n rae = multi_year_metrics[multi_year_metrics['metric'] == 'rae dc'].iloc[0].value\n correlation = multi_year_metrics[multi_year_metrics['metric'] == 'correlation'].iloc[0].value\n\n # error_metrics = metrics_calculator.get_mean_error_metrics()\n # nrmse = error_metrics.iloc[1].value\n # rae = error_metrics.iloc[2].value\n # correlation = error_metrics.iloc[0].value\n # reward = -error_metrics.value.sum()\n # logger.info(\"error_metrics: {}\".format(error_metrics))\n # logger.info(\"error_metrics: {}\".format(error_metrics.iloc[0]))\n\n # return reward\n return nrmse, rae, correlation", "def perform(self):\n if self.driver.owner != self.name:\n self.driver.owner = self.name\n\n channel = self.driver.get_channel(self.trace)\n data = channel.read_data_complete(self.highres)\n\n msg = 'Coupling {}, Average number {}'\n oscillo_config = msg. format(channel.sweep, data['VERT_COUPLING'])\n self.write_in_database('oscillo_config', oscillo_config)\n\n # if the TrigArray lentgh is null, it's a simple single sweep waveform\n if data['TRIGTIME_ARRAY'][0] == 0:\n arr = np.rec.fromarrays([data['SingleSweepTimesValuesArray'],\n data['Volt_Value_array']],\n names=['Time (s)', 'Voltage (V)'])\n self.write_in_database('trace_data', arr)\n else:\n arr = np.rec.fromarrays([data['SEQNCEWaveformTimesValuesArray'],\n data['Volt_Value_array']],\n names=['Time (s)', 'Voltage (V)'])\n self.write_in_database('trace_data', )", "def run_model(self, trend_growth='linear', trend_flexibility=0.05, changepoints=None,\n holidays=None, additional_features=None):\n\n # Prepare data to Prophet requirements\n ###################################################################\n # Create column ds with index (demand date)\n demand = self.demand\n demand['ds'] = demand.index\n\n # Adding additional features to the model as a regressor\n if additional_features is not None:\n # Select only the exog features in the list\n demand = demand[['y','ds'] + list(additional_features)]\n\n # Check holidays\n if holidays is not None:\n holidays = self.holidays_format(holidays)\n\n # Specify the carrying capacity (maximum) in a column 'cap'\n if self.market_share is not None:\n demand['cap'] = self.market_share[:len(demand)]\n # Saturating Minimum in a column 'floor'\n demand['floor'] = 0\n\n # Train model\n #####################################################################\n if self.freq is not 'D' or 'B':\n model = Prophet(growth=trend_growth, changepoint_prior_scale=trend_flexibility,\n changepoints=changepoints, holidays=holidays, weekly_seasonality=False)\n else:\n model = Prophet(growth=trend_growth, changepoint_prior_scale=trend_flexibility,\n changepoints=changepoints, holidays=holidays)\n\n # Adding regressors to the linear part of the model\n if additional_features is not None:\n for feature in additional_features:\n model.add_regressor(feature)\n\n # Fit\n model.fit(demand)\n\n # Predict Test\n ####################################################################\n\n # Dataframe to predict (including all history)\n # predict() from Prophet only receives dates as a column 'ds' (and other external variables)\n demand_predict = demand.drop(columns=['y'])\n\n # Dataframe with yhat (y predicted) and the components of the model\n forecast = model.predict(demand_predict)\n forecast.set_index('ds', inplace=True) # Set dates as index again\n\n # Diagnostics - Time series cross validation to measure forecast error using historical data\n train_period = int(len(demand) * 0.5) # Initialize with half of the sample\n test_ratio = 0.3\n horizon = int(train_period * test_ratio)\n\n error = self.evaluation(model, training_period=train_period, forecast_horizon=horizon)\n\n return forecast['yhat'], model, error", "def calc_annual_heating_savings (self):\n price = self.diesel_prices + self.cd['heating fuel premium']\n maintenance = self.comp_specs['heat recovery o&m']\n self.annual_heating_savings = -1 * \\\n (maintenance + (self.lost_heat_recovery * price))", "def project_management(self):\n # todo: add relationship to site-specific interface with public infrastructure\n if self.output_dict['actual_construction_months'] < 28:\n project_management_cost = (53.333 * self.output_dict['actual_construction_months'] ** 2 -\n 3442 * self.output_dict['actual_construction_months'] +\n 209542) * (self.output_dict['actual_construction_months'] + 2)\n else:\n project_management_cost = (self.output_dict['actual_construction_months'] + 2) * 155000\n return project_management_cost", "def run_simulation(self):\n print('RUNNING')\n self.table.clearContents()\n self.table.setRowCount(0)\n medium_tube = self.get_medium_inputs(self.input_tube)\n medium_shell = self.get_medium_inputs(self.input_shell)\n rest = self.get_main_inputs(self.input_rest)\n try:\n calculate = Calculate(medium_tube, medium_shell, rest)\n getattr(self, 'TubeT2').setText(str(round(calculate.tube.t2, 2)))\n getattr(self, 'ShellT2').setText(str(round(calculate.shell.t2, 2)))\n vysledky = calculate.calculate_all() \n except Exception as error:\n self.show_error_dialog_to_user(error.args[0])\n else:\n print('Pozadavky na vymenik splnilo {} vymeniku.'.format(len(vysledky)))\n self.show_output(vysledky)\n self.show_graph(vysledky)\n print('DONE!')", "def run(self, max_risk=0, min_return=0, num=0, init_holdings=None):\n if not self.dates:\n self.dates = ['2010-01-01', '2012-12-31']\n self.load_data()\n\n num_months = len(self.df_all)\n first_purchase = True\n result = {}\n baseline_result = {}\n self.baseline_values = [0]\n self.update_values = [0]\n months = []\n\n # Define dataframe to save output data \n headers = ['Date', 'Value'] + self.stocks + ['Variance', 'Returns']\n self.opt_results_df = pd.DataFrame(columns=headers)\n row = []\n\n self.price_df = pd.DataFrame(columns=self.stocks)\n\n # Initialize the plot\n plt.ylim(ymax = 1.5*self.budget, ymin = -1.5*self.budget)\n plt.xticks(list(range(0, num_months, 2)), \n self.df_baseline.index.strftime('%b')[::2], rotation='vertical')\n plt.locator_params(axis='x', nbins=num_months/2)\n plt.plot(list(range(0, num_months)), [0]*(num_months), \n color='red', label=\"Break-even\", linewidth=0.5)\n\n for i in range(3, num_months):\n\n # Look at just the data up to the current month\n df = self.df_all.iloc[0:i+1,:].copy()\n baseline_df_current = self.df_baseline.iloc[0:i+1,:]\n print(\"\\nDate:\", df.last_valid_index())\n months.append(df.last_valid_index().date()) \n\n if first_purchase:\n budget = self.budget\n initial_budget = self.budget\n baseline_shares = (budget / baseline_df_current.iloc[-1])\n baseline_result = {self.baseline[0]: baseline_shares} \n else:\n # Compute profit of current portfolio\n budget = sum([df.iloc[-1][s]*result['stocks'][s] for s in self.stocks]) \n self.update_values.append(budget - initial_budget)\n\n # Compute profit of fund portfolio\n fund_value = sum([baseline_df_current.iloc[-1][s]*baseline_result[s] \n for s in self.baseline]) \n self.baseline_values.append(fund_value - initial_budget)\n\n self.budget = budget \n\n self.load_data(df=df)\n\n self.price_df.loc[i-2] = list(self.price.values)\n\n # Output for user on command-line and plot\n update_values = np.array(self.update_values, dtype=object)\n baseline_values = np.array(self.baseline_values, dtype=object)\n plt.plot(range(3, i+1), update_values, \n color='blue', label=\"Optimized portfolio\")\n plt.plot(range(3, i+1), baseline_values, \n color='gray', label=\"Fund portfolio\", linewidth=0.5)\n \n if first_purchase:\n plt.legend(loc=\"lower left\")\n plt.title(\"Start: {start}, End: {end}\".format\\\n (start=self.df_all.first_valid_index().date(), \n end=self.df_all.last_valid_index().date()))\n\n plt.savefig(\"portfolio.png\")\n plt.pause(0.05)\n \n # Making solve run\n if self.model_type == 'DQM':\n print(f\"\\nMulti-Period DQM Run...\")\n \n self.build_dqm()\n self.solution['DQM'] = self.solve_dqm()\n result = self.solution['DQM']\n else:\n print(f\"\\nMulti-Period CQM Run...\")\n\n # Set budget to 0 to enforce that portfolio is self-financing \n if self.t_cost and not first_purchase:\n self.budget = 0 \n\n self.solution['CQM'] = self.solve_cqm(max_risk=max_risk, \n min_return=min_return,\n init_holdings=init_holdings)\n result = self.solution['CQM']\n init_holdings = result['stocks']\n\n # Print results to command-line\n value = sum([self.price[s]*result['stocks'][s] for s in self.stocks])\n returns = result['return']\n variance = result['risk'] \n\n row = [months[-1].strftime('%Y-%m-%d'), value] + \\\n [result['stocks'][s] for s in self.stocks] + \\\n [variance, returns] \n self.opt_results_df.loc[i-2] = row \n \n first_purchase = False\n\n print(self.opt_results_df)\n print(f'\\nRun completed.\\n')\n\n plt.savefig(\"portfolio.png\")\n plt.show(block=False)", "def test_energy_cost(self):\n rs = self.rate.get_rate_schedule(self.eir.api)\n\n i = pd.date_range(start = '2019-05-01', end='2019-06-30', freq='5min')\n s = pd.Series(data=0, index = i, dtype = np.float32)\n\n total = 10.0 * .1338\n total += 10.0 * .0969\n total += 10.0 * .1611\n total += 20.3 * 2\n s[pd.Timestamp('2019-05-01T18:00:00')] = 10.0\n s[pd.Timestamp('2019-05-01T06:00:00')] = 10.0\n s[pd.Timestamp('2019-06-05T15:00:00')] = 10.0\n\n df = rs.get_costs(s)\n\n print(df.head())", "def main():\r\n # Create an instance of the MyCallCostCalc class.\r\n my_callcost = MyCallCostCalc()", "def proforma_report(self, technologies, valuestreams, results, opt_years):\n proforma = super().proforma_report(technologies, valuestreams, results, opt_years)\n proforma_wo_yr_net = proforma.drop('Yearly Net Value', axis=1)\n proforma = self.replacement_costs(proforma_wo_yr_net, technologies)\n proforma = self.zero_out_dead_der_costs(proforma, technologies)\n proforma = self.update_capital_cost_construction_year(proforma, technologies)\n # check if there are are costs on CAPEX YEAR - if there arent, then remove it from proforma\n if not proforma.loc['CAPEX Year', :].any():\n proforma.drop('CAPEX Year', inplace=True)\n # add EOL costs to proforma\n der_eol = self.calculate_end_of_life_value(proforma, technologies, self.inflation_rate,\n opt_years)\n proforma = proforma.join(der_eol)\n if self.ecc_mode:\n for der_inst in technologies:\n if der_inst.tag == \"Load\":\n continue\n # replace capital cost columns with economic_carrying cost\n der_ecc_df, total_ecc = der_inst.economic_carrying_cost_report(\n self.inflation_rate, self.end_year, self.apply_rate)\n # drop original Capital Cost\n proforma.drop(columns=[der_inst.zero_column_name()], inplace=True)\n # drop any replacement costs\n if f\"{der_inst.unique_tech_id()} Replacement Costs\" in proforma.columns:\n proforma.drop(columns=[f\"{der_inst.unique_tech_id()} Replacement Costs\"], inplace=True)\n # add the ECC to the proforma\n proforma = proforma.join(total_ecc)\n # add ECC costs broken out by when initial cost occurs to complete DF\n self.ecc_df = pd.concat([self.ecc_df, der_ecc_df], axis=1)\n else:\n proforma = self.calculate_taxes(proforma, technologies)\n # sort alphabetically\n proforma.sort_index(axis=1, inplace=True)\n proforma.fillna(value=0, inplace=True)\n # recalculate the net (sum of the row's columns)\n proforma['Yearly Net Value'] = proforma.sum(axis=1)\n return proforma", "def example_OC3():\n example = sparAssemblyCalculation()\n example.tower_base_outer_diameter = 6.5\n example.tower_top_outer_diameter = 3.87\n example.tower_length = 77.6\n example.example_turbine_size = '3MW' #not sure if this is correct\n example.RNA_center_of_gravity_y = 1.75\n example.wall_thickness = [.057, .056, .042, .046, .052]\n example.rotor_diameter = 126.\n # example.cut_out_speed\n example.air_density = 1.198\n example.wind_reference_speed = 11.\n example.wind_reference_height = 89.350\n example.gust_factor = 1.0\n example.alpha = .11\n example.RNA_center_of_gravity_x = 1.9\n example.tower_mass = 249718.0\n example.RNA_mass = 347460.\n example.stiffener_index = 259\n example.number_of_sections = 5\n example.bulk_head = ['N', 'T', 'N', 'B', 'B']\n example.number_of_rings = [3, 2, 10, 19, 32]\n example.neutral_axis = .21 #not sure if this number is correct\n # example.straight_col_cost\n # example.tapered_col_cost\n # example.outfitting_cost\n # example.ballast_cost\n example.gravity = 9.806\n example.load_condition = 'N'\n example.significant_wave_height = 8.\n example.significant_wave_period = 10.\n example.material_density = 7850\n example.E = 200.\n example.nu = .3\n example.yield_stress = 345.\n example.shell_mass_factor = 1\n example.bulkhead_mass_factor = 1.25\n # example.ring_mass_factor\n example.outfitting_factor = .06\n example.spar_mass_factor = 1.04\n example.permanent_ballast_height = 0.\n example.fixed_ballast_height = 10.\n example.permanent_ballast_density = 4000.\n example.fixed_ballast_density = 4492.48\n # example.offset_amplification_factor\n example.water_density = 1025.\n example.spar_elevations = [10.0, -4.0, -12.0, -42., -71., -120.]\n example.spar_outer_diameter = [6.5, 6.5, 9.4, 9.4, 9.4]\n example.water_depth = 320.\n example.fairlead_depth = 70.\n example.scope_ratio = 3.609\n example.pretension_percent = 11.173 #map doesnt use\n example.mooring_diameter = .09\n example.number_of_mooring_lines = 3\n example.mooring_type = 'CHAIN'\n example.anchor_type = 'PILE'\n example.fairlead_offset_from_shell = .5\n example.user_MBL = 8158000.\n example.user_WML = 71.186\n example.user_AE_storm = 384243000/.006\n example.user_MCPL = 0.\n example.user_anchor_cost = 0.\n example.misc_cost_factor = 10\n example.number_of_discretizations = 20 #map doesnt use\n example.spar.stiffener_curve_fit = False #not sure if this is correct\n example.run()\n print '-------------OC3---------------'\n sys_print(example)", "def main():\n\tresults = []\n\n\tconfig = configparser.ConfigParser()\n\tconfig.read(\"simulation.ini\")\n\tsettings = config['sim']\n\n\tcompleted_obj_hw = int(settings[\"ClientsPerCampaign\"]) * float(settings[\"CompletedPctgHW\"])\n\texceeded_obj_hw = float(settings[\"ExceededPctgHW\"])\n\tsignificance_level = float(settings[\"SignificanceLevel\"])\n\tz_val_two_tails = scipy.stats.norm.ppf(1 - (significance_level / 2))\n\n\tprint(\"Completed Target HW: \" + str(completed_obj_hw))\n\tprint(\"Exceeded Target HW: \" + str(exceeded_obj_hw))\n\n\tcompleted_vals = []\n\texceeded_vals = []\n\tdone = False\n\n\tcompleted_avg = 0\n\texceeded_avg = 0\n\tcompleted_hw = 0\n\texceeded_hw = 0\n\n\ti = 0\n\twhile not done:\n\t\tprint(\"RUN: \" + str(i + 1))\n\t\tenv = simpy.Environment()\n\t\tsim = Simulation(env, settings, i == 0)\n\t\tsim.run()\n\t\tresults.append(sim.results)\n\t\ti += 1\n\n\t\tif settings['RunOnce'] == 'yes':\n\t\t\tprint(\"RUN ONCE\")\n\t\t\tsys.exit()\n\n\t\tcompleted_vals.append(sim.results['completed_count'])\n\t\texceeded_vals.append(sim.results['exceeded_proportion'])\n\n\t\tif i < 2:\n\t\t\tprint(\"---------------\")\n\t\t\tcontinue\n\n\t\tcompleted_avg = sum(completed_vals) / len(completed_vals)\n\t\tcompleted_S = sum([(v - completed_avg) ** 2 for v in completed_vals]) / (i - 1)\n\t\tcompleted_S = math.sqrt(completed_S)\n\t\tcompleted_hw = (z_val_two_tails * completed_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" completed HW: \" + str(completed_hw))\n\n\t\texceeded_avg = sum(exceeded_vals) / len(exceeded_vals)\n\t\texceeded_S = math.sqrt(exceeded_avg * (1 - exceeded_avg))\n\t\texceeded_hw = (z_val_two_tails * exceeded_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" exceeded HW: \" + str(exceeded_hw))\n\n\t\tif completed_hw < completed_obj_hw and exceeded_hw < exceeded_obj_hw:\n\t\t\tprint(\"END ITERATIONS\")\n\t\t\tdone = True\n\n\t\tprint(\"---------------\")\n\n\n\tfilename = 'results/Results_' + settings['FileSizeGB'] + '_' + settings['TorrentThreshold'] + '_' + settings['HTTPDownThreshold'] \\\n\t\t+ '_' + settings['HTTPUp'] + '_' + str(random.randint(0,10000)) + '.xlsx'\n\n\tprint(\"Saving XLSX to: \" + filename)\n\twb = xs.Workbook(filename)\n\n\tws = wb.add_worksheet()\n\n\tws.write(0, 1, 'Exceded')\n\tws.write(0, 2, 'Completed')\n\n\ti = 1\n\tfor result in results:\n\t\tws.write(i, 0, i)\n\t\tws.write(i, 1, result['exceeded_proportion'])\n\t\tws.write(i, 2, result['completed_count'])\n\t\ti += 1\n\n\tws.write(i, 0, 'average')\n\tws.write(i, 1, exceeded_avg)\n\tws.write(i, 2, completed_avg)\n\ti += 1\n\tws.write(i, 0, 'half width')\n\tws.write(i, 1, exceeded_hw)\n\tws.write(i, 2, completed_hw)\n\n\twb.close()", "def compute(self):\n # update parameters with the values of widgets\n lambd = float(str(self.lambdtext.text()))\n p = float(str(self.ptext.text()))\n # check the values\n # compute\n self.data[2] = self.baseline_als(self.data[1], lambd, p)\n # store the values\n self.lambd = lambd\n self.p = p\n self.updatePlot()", "def job_changes(self):\n cols = \"{:25}{:12.1f}\"\n cols2 = \"{:25}{:12.1f}{:12.1f}\"\n\n lines = [\"Benefit from job creation: \" + self.plant.name + \"\\n\"]\n\n row7 = self.farmer.labor()[1]\n row1 = self.farmer.labor_cost()[1]\n row8 = self.reseller.driving_work()[1]\n row2 = self.reseller.driving_wages()[1]\n row11 = self.reseller.loading_work()[1]\n row12 = self.reseller.loading_wages()[1]\n row9 = self.cofiring_plant.cofuel_om_work()[1]\n row3 = self.cofiring_plant.cofuel_om_wages()[1]\n row6 = -self.coal_work_lost[1]\n row5 = -self.coal_wages_lost[1]\n row10 = self.labor[1]\n row4 = self.wages[1]\n\n display_as(row6, \"FTE\")\n display_as(row7, \"FTE\")\n display_as(row8, \"FTE\")\n display_as(row9, \"FTE\")\n display_as(row10, \"FTE\")\n display_as(row11, \"FTE\")\n\n lines.append(cols2.format(\"Biomass collection\", row7, row1))\n lines.append(cols2.format(\"Biomass transportation\", row8, row2))\n lines.append(cols2.format(\"Biomass loading\", row11, row12))\n lines.append(cols2.format(\"O&M\", row9, row3))\n lines.append(cols2.format(\"Mining\", row6, row5))\n lines.append(cols2.format(\"Total\", row10, row4))\n lines.append(\"\")\n lines.append(cols.format(\"Area collected\", self.supply_chain.area()))\n lines.append(\n cols.format(\"Collection radius\", self.supply_chain.collection_radius())\n )\n lines.append(\n cols.format(\"Maximum transport time\", self.reseller.max_trip_time())\n )\n lines.append(cols.format(\"Number of truck trips\", self.reseller.truck_trips[1]))\n lines.append(\"\")\n lines.append(\"Mining job lost from co-firing at \" + self.plant.name + \"\\n\")\n lines.append(cols.format(\"Coal saved\", self.coal_saved[1]))\n lines.append(\n cols.format(\"Productivity\", self.mining_parameter.productivity_underground)\n )\n lines.append(cols.format(\"Job lost\", self.coal_work_lost[1]))\n lines.append(cols.format(\"Job lost\", display_as(self.coal_work_lost[1], \"FTE\")))\n lines.append(\n cols.format(\"Wage\", display_as(self.mining_parameter.wage_mining, \"USD/hr\"))\n )\n lines.append(cols.format(\"Wage lost\", self.coal_wages_lost[1]))\n return \"\\n\".join(lines)", "def execute(self):\n \n # Check engine speed to see if it goes beyond our bounds:\n \n if self.RPM > 6000:\n self.overspeed = True\n else:\n self.overspeed = False\n\n if self.RPM < 1000:\n self.underspeed = True\n else:\n self.underspeed = False\n \n # These Constants are all hard-coded for Gasoline.\n # Eventually, we'll move them to the input so that they can be tweaked.\n # (Possibly by just selecting a fuel-type)\n k = 1.3 # k (Specific heat ratio for Air)\n R = 287.0 # R (Gas constant for Air - J/kg/degK)\n Ru = 8.314 # R (Gas constant for Air - J/mole/degK)\n Hu = 44000.0 # Heating Value for gasoline (44000 kJ/kg)\n Tw = 400.0 # Tw (Combustion Wall Temperature 400 degrees K)\n AFR = 14.6 # Air Fuel Ratio for gasoline\n P_exth = 152 # Exhaust gas pressure\n P_amb = 101.325 # Ambient Pressure (kPa)\n T_amb = 298 # Ambient Temperature (deg K)\n air_density = 1.2 # Air Density (1.2 kg/m**2)\n fuel_density = 740.0 # Gasoline Density (740.0 kg/m**2)\n mw_air = 28.97 # Molecular Weight of Air (g/mol)\n mw_fuel = 114 # Molecular Weight of Gasoline (g/mol)\n\n thetastep = 1.0 # Simulation time stepsize (crank angle degrees)\n\n # Convert mm to m\n stroke = self.stroke*.001\n bore = self.bore*.001\n conrod = self.conrod*.001\n D_v = self.D_v*.001\n L_v = self.L_v*.001\n comp_ratio = self.comp_ratio\n spark_angle = self.spark_angle*pi/180.0\n ncyl = self.ncyl\n IVO = self.IVO\n IVC = self.IVC\n RPM = self.RPM\n\n #--------------------------------------------------------------\n # Calculations independent of crank angle\n #--------------------------------------------------------------\n\n disp = .25*pi*bore*bore*stroke*ncyl\n l_a = conrod/(.5*stroke) # a=half the stroke\n resVol = 1.0/(comp_ratio-1.0)\n n = RPM*.001\n t_to_theta = RPM/60.0*2.0*pi\n thetastep *= pi/180.0\n intake_close = (IVC-180.0)*pi/180.0\n intake_open = (IVO-360.0)*pi/180.0\n\n # Burn duration valid for speeds between 1000 and 6000 RPM (Eq 3-6)\n # Burn end taken at dQ/dt = 1e-15 (very conservative)\n burn_duration = (-1.6189*n*n + 19.886*n + 39.951)*pi/180.0\n burn_end = 2.0*burn_duration\n r_burn_duration = 1.0/burn_duration\n\n # Exhaust Temperature valid for speeds between 1000 and 6000 RPM \\\n # (Eq 3-21)\n T_exh = 3.3955*n*n*n - 51.9*n*n + 279.49*n + 676.21\n\n # Residual Mass\n # Exhaust gas (mw = 30.4 g/mol, P = 1.52 atm)\n m_res = 1.52*(101.325)*30.4*disp/((comp_ratio-1.0)*T_exh*Ru)\n\n # Mean Piston Speed\n Cm = 2*stroke*RPM/60.0\n\n # Frictional Loss Factor valid for speeds between 1000 and 6000 RPM \\\n # (Eq 3-19)\n Cf = -0.019738*n + 0.986923\n\n # Charge Heating Factor valid for speeds between 1000 and 6000 RPM \\\n # (Eq 3-20)\n C_heat = -0.043624*n + 1.2953\n\n # Pressure ratio for choked flow in intake valve\n Pratio_crit = (2/(k+1))**(k/(k-1))\n\n # Fuel-Air Molecular Weight\n #mw = (1.0 + AFR)/(AFR*mw_air + 1.0/mw_fuel)\n mw = (AFR*mw_air + mw_fuel)/(1.0+AFR)\n\n #Hohenberg Correlation: crank-angle independent portion\n h_ind = 130.0 * disp**(-0.06) * (Cm+1.4)**0.8\n\n # FMEP (frictional Losses) (Eq 3-22)\n FMEP = .05*n*n + .15*n + .97\n\n # Correct ambient P & T for losses\n P0 = P_amb*Cf\n T0 = T_amb*C_heat\n \n # Orifice equation constant terms\n C1 = (2000.0*mw/(Ru*T0) * (k/(k-1)))\n C2 = thetastep*self.throttle*P0/t_to_theta\n e1 = 2.0/k\n e2 = (k + 1.0)/k\n \n # Heat Input Eq Constant Term\n Qfac = .95*Hu/(1.0+AFR)\n \n # Valve Lifting function constant terms\n valve1 = pi/( IVO + IVC + 180 )\n\n # Initial value for all integration variables (and their dependents)\n mass_in = 0.0\n Qloss = 0.0\n P = P_exth\n Pmix = 0.0\n pmi = 0.0\n\n for thetad in xrange(-360, 181):\n theta = thetad*thetastep\n\n #--------------------------------------------------------------\n # Slider Crank Model\n #--------------------------------------------------------------\n \n s_theta = sin(theta)\n c_theta = cos(theta)\n term = (l_a**2 - s_theta**2)**.5\n term2 = (l_a + 1.0 - c_theta - term) \n\n # Clyinder Volume (Eq 3-1)\n V = disp*( resVol + .5*term2 )\n dV_dtheta = .5*disp*s_theta*( 1.0 + c_theta/term )\n\n # Exposed Combustion Area (Eq 3-2)\n A = .5*pi*bore*( bore + stroke*term2 )\n\n #--------------------------------------------------------------\n # Weibe Function\n #--------------------------------------------------------------\n\n thetaSinceSpark = theta - spark_angle\n\n if thetaSinceSpark > 0 and thetaSinceSpark < burn_end:\n\n # Weibe Function for mass fraction burn (Eq 3-4)\n # weibe = 1.0 - exp(-5.0*pow(thetaSinceSpark/burn_duration, 3))\n fac1 = thetaSinceSpark*r_burn_duration\n dWeibe_dtheta = -exp(-5.0*(fac1)**3.0)*(\n -15.0*(fac1)**2.0)*r_burn_duration\n\n #--------------------------------------------------------------\n # Calculate Total Heat Input\n #--------------------------------------------------------------\n \n # Total Heat Input. (Eq 3-7)\n # Mass_in is integrated as we go from IVO to IVC \n # .95 because not all mass is burned.\n Q = Qfac*mass_in\n \n #--------------------------------------------------------------\n # Calculate Heat Release\n #--------------------------------------------------------------\n \n # Heat Release. (Eq 3-5)\n dQ_dtheta = Q*dWeibe_dtheta\n \n else:\n dQ_dtheta = 0.0\n\n #--------------------------------------------------------------\n # Cylinder Pressure Model\n #--------------------------------------------------------------\n\n # Cylinder Pressure. (Eq 3-3)\n P += (((k-1)*(dQ_dtheta - Qloss) - k*P*dV_dtheta)/V)*thetastep\n\n # Calculate mass flow only when intake valve is open\n if theta <= intake_close and theta >= intake_open:\n\n #--------------------------------------------------------------\n # Valve Lift, Area, and Discharge Coefficient\n #--------------------------------------------------------------\n\n phi = valve1*( IVO - IVC + 540 + 2.0*thetad )\n\n # Valve Lift Function. (Eq 3-16)\n Lv = .5*L_v*(1+cos(phi))\n\n # Valve curtain area. (Eq 3-12)\n Ar = pi*D_v*Lv\n \n LD = Lv/D_v\n \n # Discharge coefficient for inlet poppet valve. (Eq 3-18)\n CD = ( 190.47*LD*LD*LD*LD - 143.13*LD*LD*LD +\n 31.248*LD*LD - 2.5999*LD + 0.6913 )\n \n #--------------------------------------------------------------\n # Find pressure ratio for intake flow\n #--------------------------------------------------------------\n \n # Note 5.5 is a fudge factor that still needs investigation.\n Pratio = (P+5.5*Pmix)/P0\n\n # Pratio>1 means outflow\n if Pratio > 1:\n Pratio = 1.0/Pratio\n flow_direction = -1.0\n else:\n flow_direction = 1.0\n\n if Pratio < Pratio_crit:\n Pratio = Pratio_crit\n \n #--------------------------------------------------------------\n # Calculate Intake Mass Flow\n #--------------------------------------------------------------\n \n # Mass flow rate. (Eq 3-15)\n # Note, 3-15 is wrong, or an approximation or something\n # Changed to standard orifice equation for better results\n dm_dtheta = flow_direction*CD*Ar*C2*( C1*\n (Pratio**e1 - Pratio**e2) )**.5\n mass_in += dm_dtheta\n\n\n #--------------------------------------------------------------\n # Fuel Stochiometry\n #--------------------------------------------------------------\n\n # Temperature\n Tg = P*V*mw/((mass_in+m_res)*Ru)\n\n # Mixture Pressure (kPa)\n Pmix = mass_in*T0*Ru/(mw*V)\n\n #--------------------------------------------------------------\n # Calculate Heat Transfer Coefficient\n #--------------------------------------------------------------\n\n # Hohenberg Correlation. (Eq 3-10)\n h = h_ind * P**0.8 * Tg**(-0.4)\n\n #--------------------------------------------------------------\n # Calculate Heat Loss\n #--------------------------------------------------------------\n\n # Heat loss to the Cylinder Wall (Kj/radian). (Eq 3-8)\n Qloss = .001*h*A*(Tg-Tw)/t_to_theta\n\n #--------------------------------------------------------------\n # Work & Power\n #--------------------------------------------------------------\n\n # IMEP (Eq 3-23)\n pmi += (P+Pmix)*dV_dtheta\n\n\n # Effective Pressure (Eq 3-24)\n BMEP = pmi*thetastep/disp - FMEP\n\n # Effective Power (kwatt=kN*m/sec) (Eq 3-25)\n self.power = 0.5*BMEP*RPM*disp*ncyl/60\n\n # Torque (kN*m->Nm) (Eq 3-26)\n self.torque = 30.0*self.power/(pi*RPM)*1000.0\n\n # Fuel Burn (liters/sec)\n self.fuel_burn = (ncyl*mass_in*1000.0*RPM)/(\n 60.0*fuel_density*(1.0+AFR)*2.0)\n\n # Engine Wieght (Empirical) (kg)\n self.engine_weight = (100.0/.002)*(disp-.001) + 75.0", "def main():\n \n MiscUtil.PrintInfo(\"\\n%s (RDK v%s; %s): Starting...\\n\" % (ScriptName, rdBase.rdkitVersion, time.asctime()))\n \n (WallClockTime, ProcessorTime) = MiscUtil.GetWallClockAndProcessorTime()\n \n # Retrieve command line arguments and options...\n RetrieveOptions()\n \n # Process and validate command line arguments and options...\n ProcessOptions()\n \n # Perform actions required by the script...\n CalculateRMSD()\n \n MiscUtil.PrintInfo(\"\\n%s: Done...\\n\" % ScriptName)\n MiscUtil.PrintInfo(\"Total time: %s\" % MiscUtil.GetFormattedElapsedTime(WallClockTime, ProcessorTime))", "def run(self):\n\n self._check_hardware_control()\n\n if self._is_stabilizing:\n #If we are locking the power, then need to update teh feedback loop and change the output label\n self._update_feedback()\n self._update_output_voltage_label()\n\n #We always need to update the plots as well and power label\n\n self._update_plots()\n self._update_power_label()\n\n self.gui.force_update()", "def calculate_profit(self):", "def run_baseline_right_away(self, n_days_base, sim_time):\n baseline_power = np.zeros([sim_time, ])\n baseline_cycles = np.zeros([sim_time, ])\n baseline_Tin = np.zeros([sim_time, ])\n baseline_std_Tin = np.zeros([sim_time, ])\n baseline_Tin_max = np.zeros([sim_time, ])\n baseline_Tin_min = np.zeros([sim_time, ])\n baseline_soc = np.zeros([sim_time, ]) \n baseline_std_soc = np.zeros([sim_time, ]) \n\n EIRrated = 0.31019\n Qrated=14600\n\n # inputs and outputs path\n inputs_file='./fleets/HVAC_fleet/data_file/LasVegas_HighCDD.csv'\n bldg_file='./fleets/HVAC_fleet/data_file/normal_building_para.xlsx'\n save_dir='./fleets/HVAC_fleet/data_file/baseline'\n\n\n #read in weather data and increase resolution to match time step of simulation\n inputs=pd.read_csv(inputs_file, sep=',',skipfooter=48,engine='python')\n inputs[c.COL_DATETIME]=pd.to_datetime(inputs[c.COL_DATETIME])\n inputs=inputs.set_index(c.COL_DATETIME)\n inputs_ts=inputs.resample(str(self.sim_step)+'T').interpolate()\n \n for day in range(n_days_base):\n print(\"Day %i\" %(day+1))\n \n #initialize dataframes\n timeB = np.array(np.arange(0,60*60*24,self.sim_step))\n plot_timeB = np.array(np.arange(0,24,self.sim_step/3600))\n\n Tin=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n Tmass=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n Twall=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n Tattic=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n ACstatus=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n dTin=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n dTmass=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n dTwall=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n dTattic=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n Power=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n cycles=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n SOC=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n \n num_homes = self.numHVAC \n \n #Load building characteristics from file\n df_bldg=pd.DataFrame.from_csv(bldg_file,sep=',')\n \n #Initialize Temperatures and assign building and AC characteristics\n b=[0]*num_homes\n ac=[0]*num_homes\n\n for i in range(0,num_homes):\n b[i]=building(df_bldg.T_in[i],df_bldg.T_mass[i],df_bldg.T_wall[i],df_bldg.T_attic[i],df_bldg.Rwall[i],df_bldg.Rattic[i],df_bldg.Rwin[i],df_bldg.SHGC[i],df_bldg.Cwall[i],df_bldg.Cin[i],df_bldg.C1[i],df_bldg.C2[i],df_bldg.C3[i],df_bldg.Cattic[i],df_bldg.Rroof[i],df_bldg.Cmass[i],df_bldg.Rmass[i],df_bldg.Sp1[i],df_bldg.Sp2[i],df_bldg.Sp3[i],df_bldg.Qrated[i],df_bldg.EIRrated[i],df_bldg.TinWB[i],df_bldg.Initial_On[i])\n Tin.iloc[0,i]=b[i].T_in\n Tmass.iloc[0,i]=b[i].T_mass\n Twall.iloc[0,i]=b[i].T_wall\n Tattic.iloc[0,i]=b[i].T_attic\n\n ac[i]=AC(df_bldg.Qrated[i],df_bldg.EIRrated[i])\n \n #Main simulation loop \n for i in range(0,int(1440*60/self.sim_step)):\n for j in range(0,num_homes): \n \n if i>0: #initialize AC status for each timestep to prior state\n ACstatus.iloc[i,j]=ACstatus.iloc[i-1,j]\n else: #Set AC status based on file input for first timestep\n ACstatus.iloc[i,j]=b[j].Initial_On\n if Tin.iloc[i,j]>=self.Tset+self.deadband and ACstatus.iloc[max(i-self.shortcycle_ts,0):i,j].sum()==0: #if temp is above deadband and unit has not run in past duration corresponding to short cycle timer turn on unit\n ACstatus.iloc[i,j]=1.0\n if Tin.iloc[i,j]<=self.Tset-self.deadband: #if temperature is below bottom deadband, turn off unit\n ACstatus.iloc[i,j]=0.0\n #count cycles\n if i>0 and ACstatus.iloc[i-1,j]==1.0 and ACstatus.iloc[i,j]==0.0:\n cycles.iloc[i,j]=1.0\n \n #calculate power use for each AC based on status\n Power.iloc[i,j]=ACstatus.iloc[i,j]*Capacity(ac[j],inputs_ts[c.COL_TOUT][i],b[j].TinWB)*EIR(ac[j],inputs_ts[c.COL_TOUT][i],b[j].TinWB)\n \n # calculate SOC for each AC\n SOC.iloc[i,j] = (self.Tset+self.deadband - Tin.iloc[i,j])/(2*self.deadband)\n\n #building model dT calculations\n ts = self.sim_step\n dTin.iloc[i,j]=ts*1.0/b[j].Cin*((Twall.iloc[i,j]-Tin.iloc[i,j])*2.0/b[j].Rwall\n +(Tattic.iloc[i,j]-Tin.iloc[i,j])/b[j].Rattic\n +(Tmass.iloc[i,j]-Tin.iloc[i,j])/b[j].Rmass+inputs_ts[c.COL_QIHL][i]*b[j].C1*b[j].Sp1\n +inputs_ts[c.COL_RADWIN][i]*b[j].SHGC*25.76*b[j].C3*b[j].Sp3\n -ACstatus.iloc[i,j]*Capacity(ac[j],inputs_ts[c.COL_TOUT][i],b[j].TinWB)*SHR(inputs_ts[c.COL_TOUT][i],Tin.iloc[i,j],b[j].TinWB)*b[j].C2*b[j].Sp2\n +(inputs_ts[c.COL_TOUT][i]-Tin.iloc[i,j])/b[j].Rwin)\n dTmass.iloc[i,j]=ts*1.0/b[j].Cmass*((Tin.iloc[i,j]-Tmass.iloc[i,j])/b[j].Rmass\n +inputs_ts[c.COL_QIHL][i]*b[j].C1*(1-b[j].Sp1)\n +inputs_ts[c.COL_RADWIN][i]*b[j].SHGC*25.76*b[j].C3*(1-b[j].Sp3)\n -ACstatus.iloc[i,j]*Capacity(ac[j],inputs_ts[c.COL_TOUT][i],b[j].TinWB)*SHR(inputs_ts[c.COL_TOUT][i],Tin.iloc[i,j],b[j].TinWB)*b[j].C2*(1-b[j].Sp2))\n dTwall.iloc[i,j]=ts*1.0/b[j].Cwall*((inputs_ts['Tsolw'].iloc[i]-Twall.iloc[i,j])*2.0/b[j].Rwall\n +(Tin.iloc[i,j]-Twall.iloc[i,j])*2.0/b[j].Rwall)\n dTattic.iloc[i,j]=ts*1.0/b[j].Cattic*((inputs_ts['Tsolr'].iloc[i]-Tattic.iloc[i,j])/b[j].Rroof\n +(Tin.iloc[i,j]-Tattic.iloc[i,j])/b[j].Rattic)\n \n #calculate temperatures for next time step\n if i<(1440*60/ts-1):\n Tin.iloc[i+1,j]=Tin.iloc[i,j]+dTin.iloc[i,j]\n Tmass.iloc[i+1,j]=Tmass.iloc[i,j]+dTmass.iloc[i,j]\n Tattic.iloc[i+1,j]=Tattic.iloc[i,j]+dTattic.iloc[i,j]\n Twall.iloc[i+1,j]=Twall.iloc[i,j]+dTwall.iloc[i,j]\n\n # calculate peak power and plot data\n PeakPower=Power.sum(axis=1).max()*1.0 # plot out the peak power in kW\n Plot_Power=np.full(len(plot_timeB),PeakPower/1000.0)\n fig,ax = plt.subplots(2,1,figsize=(6,8),sharey='row')\n p1=ax[0].plot(plot_timeB,Power.sum(axis=1)/1000.0,color='blue',linestyle='solid',label='Baseline')\n ax[0].plot(plot_timeB,Plot_Power,color='black',linestyle='--',label='Targeted Power')\n ax[0].set_ylabel('Total Power (kW)')\n ax[1].set_ylabel('Indoor Temperature ($^\\circ$C)')\n ax[1].set_xlabel('Hour of Day')\n p2=ax[1].plot(plot_timeB,Tin.mean(axis=1),color='blue',linestyle='solid',label='Baseline Avg')\n p3=ax[1].plot(plot_timeB,Tin.max(axis=1),color='blue',linestyle='dotted',label='Baseline Min/Max')\n p4=ax[1].plot(plot_timeB,Tin.min(axis=1),color='blue',linestyle='dotted',label='_nolegend_')\n \n # Saves baseline data to csv\n #ToDo: needs cut the whole day simulation to compare only segment with providing grid services\n # But, that requires the simulation steps information.\n\n Power.to_csv(str(save_dir)+'\\\\Power_base'+'.csv')\n Tin.to_csv(str(save_dir)+'\\\\Tin_base'+'.csv')\n SOC.to_csv(str(save_dir)+'\\\\SOC_base'+'.csv')\n cycles.to_csv(str(save_dir)+'\\\\Cycles_base'+'.csv')\n \n # return values\n baseline_power = Power.sum(axis = 1)\n baseline_cycles = cycles.sum(axis = 1)\n\n baseline_soc = SOC.mean(axis = 1)\n baseline_std_soc = SOC.std(axis = 1)\n\n baseline_Tin = Tin.mean(axis = 1)\n baseline_std_Tin = Tin.std(axis = 1)\n baseline_Tin_max = Tin.max(axis=1)\n baseline_Tin_min = Tin.min(axis=1)\n \n return baseline_soc, baseline_std_soc, baseline_power, baseline_cycles, baseline_Tin, baseline_std_Tin, baseline_Tin_max, baseline_Tin_min", "def engineering_foundations_collection_sys(self):\n # development engineering costs for foundations and collection system\n if self.input_dict['project_size_megawatts'] < 200:\n development_engineering_cost = 7188.5 * self.input_dict['num_turbines'] + round(3.4893 * math.log(self.input_dict['num_turbines']) - 7.3049, 0) * 16800 + 165675\n else:\n development_engineering_cost = 7188.5 * self.input_dict['num_turbines'] + round(3.4893 * math.log(self.input_dict['num_turbines']) - 7.3049, 0) * 16800 + 327250\n\n # engineering costs for met masts\n # TODO: Projects less than 30 MW for met masts\n if 30 <= self.input_dict['project_size_megawatts'] <= 100:\n num_perm_met_mast = 2\n num_temp_met_mast = 2\n elif 100 < self.input_dict['project_size_megawatts'] <= 300:\n num_perm_met_mast = 2\n num_temp_met_mast = 4\n elif self.input_dict['project_size_megawatts'] > 300:\n num_perm_met_mast = round(self.input_dict['project_size_megawatts'] / 100)\n num_temp_met_mast = round(self.input_dict['project_size_megawatts'] / 100) * 2\n else:\n num_perm_met_mast = 1\n num_temp_met_mast = 1\n\n if self.input_dict['hub_height_meters'] < 90:\n multiplier_perm = 232600\n multiplier_temp = 92600\n else:\n multiplier_perm = 290000\n multiplier_temp = 116800\n\n met_mast_cost = (num_perm_met_mast * multiplier_perm) + (num_temp_met_mast * multiplier_temp) + 200000\n\n engineering_cost = development_engineering_cost + met_mast_cost\n\n return engineering_cost", "def total_management_cost(self):\n total = 0\n total += self.output_dict['insurance_usd']\n total += self.output_dict['construction_permitting_usd']\n total += self.output_dict['bonding_usd']\n total += self.output_dict['project_management_usd']\n total += self.output_dict['markup_contingency_usd']\n total += self.output_dict['engineering_usd']\n total += self.output_dict['site_facility_usd']\n return total", "def main():\n\tshow_program_intro()\n\tbyte_lines = read_rain_gauge_sunnyside_school()\n\t#print_rain_guage_output(byte_lines)\n\ttotals_dict = parse_regex_daily_total(byte_lines)\n\ttotals_list = sort_rain_dictionary(totals_dict)\n\thighest_rainfall = get_day_highest_rainfall(totals_list)\n\tprint_highest_rainfall(highest_rainfall)\n\tyear_highest_rain = get_year_with_most_rain(totals_list)\n\tprint_year_most_rain(year_highest_rain)", "def main():\n\n # Get dataset and create pandas dataframe\n f_data = \"../data/dataset.xlsx\"\n df = pd.read_excel(f_data)\n\n # Get variables for indices\n years = list(set(df[\"Year\"][3:]))\n years_arr = df[\"Year\"][3:]\n\n # Get values from dataset\n population = df[\"Population.1\"][3:]\n auto_commuters = df[\"Auto\"][3:]\n free_traffic = df[\"Freeway\"][3:]\n arterial_traffic = df[\"Arterial Street\"][3:]\n general_time_value = df[\"Cost Components\"][3:]\n commercial_time_value = df[\"Unnamed: 12\"][3:]\n gasoline_cost = df[\"Unnamed: 13\"][3:]\n diesel_cost = df[\"Unnamed: 14\"][3:]\n excess_fuel_per_commuter = df[\"Unnamed: 20\"][3:]\n annual_hrs_of_delay = df[\"Unnamed: 24\"][3:]\n travel_time_index = df[\"Travel Time Index\"][3:]\n cost_per_autocommuter = df[\"Unnamed: 34\"][3:]\n uber = df[\"Uber Entry Dummies\"][3:]\n lyft = df[\"Lyft Entry Dummies\"][3:]\n both = df[\"UberXlyft\"][3:]\n unemployment = df[\"Unemployment Rate (%)\"][3:]\n\n # Get covariances\n filled_ump = copy.deepcopy(unemployment).fillna(value=0)\n print(\"Correlation of uber and ump: {}\".format(np.corrcoef(filled_ump, uber)))\n print(\"Correlation of lyft and ump: {}\".format(np.corrcoef(filled_ump, lyft)))\n print(\"Covariance of tti and ump: {}\".format(np.corrcoef(filled_ump,\n travel_time_index.astype(np.float32))))\n print(\"Covariance of cost and ump: {}\".format(np.corrcoef(filled_ump,\n cost_per_autocommuter.astype(np.float32))))\n print(\"Covariance of excess and ump: {}\".format(np.corrcoef(filled_ump,\n excess_fuel_per_commuter.astype(np.float32))))\n print(\"Covariance of delay and ump: {}\".format(np.corrcoef(filled_ump,\n annual_hrs_of_delay.astype(np.float32))))\n\n # Create output data structure\n year_dict = {years[i]: {\"pop\": [], \"auto\": [], \"free\": [], \"art\": [],\n \"gen_time\": [], \"comm_time\": [], \"gas\": [], \"diesel\":\n [], \"ann_delay\": [], \"travel_index\": [], \"cost\":\n [], \"ub\": [], \"ly\": [], \"bo\": [], \"ump\": [],\n \"excess_gas\": []} for i in range(len(years))}\n\n # Counter variable\n i = 0\n\n # Iterate through everything for plots\n for year, pop, auto, free, art, gen_time, comm_time, gas, diesel, excess_gas, \\\n ann_delay, travel_index, cost, ub, ly, bo, ump in \\\n zip(years_arr, population, auto_commuters, free_traffic,\n arterial_traffic, general_time_value, commercial_time_value,\n gasoline_cost, diesel_cost, excess_fuel_per_commuter,\n annual_hrs_of_delay, travel_time_index, cost_per_autocommuter,\n uber, lyft, both, unemployment):\n\n # Append values to dictionary for plotting\n year_dict[year][\"pop\"].append(pop)\n year_dict[year][\"auto\"].append(auto)\n year_dict[year][\"free\"].append(free)\n year_dict[year][\"art\"].append(art)\n year_dict[year][\"gen_time\"].append(gen_time)\n year_dict[year][\"comm_time\"].append(comm_time)\n year_dict[year][\"gas\"].append(gas)\n year_dict[year][\"diesel\"].append(diesel)\n year_dict[year][\"ann_delay\"].append(ann_delay)\n year_dict[year][\"travel_index\"].append(travel_index)\n year_dict[year][\"cost\"].append(cost)\n year_dict[year][\"ub\"].append(ub)\n year_dict[year][\"ly\"].append(ly)\n year_dict[year][\"bo\"].append(bo)\n year_dict[year][\"ump\"].append(ump)\n year_dict[year][\"excess_gas\"].append(excess_gas)\n\n # Average values according to year\n for key_i in list(year_dict.keys()):\n for key_j in list(year_dict[key_i].keys()):\n vals = copy.deepcopy(year_dict[key_i][key_j])\n year_dict[key_i][key_j] = np.mean(vals)\n\n # Now make arrays for time series data\n pop_by_year = [year_dict[years[i]][\"pop\"] for i in range(len(years))]\n auto_by_year = [year_dict[years[i]][\"auto\"] for i in range(len(years))]\n free_by_year = [year_dict[years[i]][\"free\"] for i in range(len(years))]\n art_by_year = [year_dict[years[i]][\"art\"] for i in range(len(years))]\n gen_time_by_year = [year_dict[years[i]][\"gen_time\"] for i in range(len(years))]\n comm_time_by_year = [year_dict[years[i]][\"comm_time\"] for i in range(len(\n years))]\n gas_by_year = [year_dict[years[i]][\"gas\"] for i in range(len(years))]\n diesel_by_year = [year_dict[years[i]][\"diesel\"] for i in range(len(years))]\n ann_delay_by_year = [year_dict[years[i]][\"ann_delay\"] for i in range(len(\n years))]\n travel_index_by_year = [year_dict[years[i]][\"travel_index\"] for i in\n range(len(years))]\n cost_by_year = [year_dict[years[i]][\"cost\"] for i in range(len(years))]\n ub_by_year = [year_dict[years[i]][\"ub\"] for i in range(len(years))]\n ly_by_year = [year_dict[years[i]][\"ly\"] for i in range(len(years))]\n bo_by_year = [year_dict[years[i]][\"bo\"] for i in range(len(years))]\n ump_by_year = [year_dict[years[i]][\"ump\"] for i in range(len(years))]\n excess_gas_per_year = [year_dict[years[i]][\"excess_gas\"] for i in range(len(\n years))]\n\n\n # Make plots\n plt.plot(years, pop_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Average Population of UMR Urban Centers (1000s)\")\n plt.title(\"Average Population of Urban Mobility Report Urban Centers over Time\")\n plt.savefig(\"../graphs/pop_vs_time.png\")\n plt.clf()\n\n plt.plot(years, auto_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Autocommuters (1000s)\")\n plt.title(\"Average Number of Autocommuters in UMI Urban Centers (1000s)\")\n plt.savefig(\"../graphs/auto_vs_time.png\")\n plt.clf()\n\n plt.plot(years, free_by_year, color=\"b\", label=\"Freeways\")\n plt.plot(years, art_by_year, color=\"r\", label=\"Arterial Roads\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Driving Distance (miles)\")\n plt.title(\"Average Net Freeway/Arterial Road Driving over Time (\"\n \"1000s of miles)\")\n plt.savefig(\"../graphs/dist_vs_time.png\")\n plt.clf()\n\n plt.plot(years, gen_time_by_year, color=\"b\", label=\"General Value\")\n plt.plot(years, comm_time_by_year, color=\"r\", label=\"Commercial Value\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Value ($/hr)\")\n plt.title(\"Average General and Commercial Values of Time over Time\")\n plt.savefig(\"../graphs/val_of_time_vs_time.png\")\n plt.clf()\n\n plt.plot(years, gas_by_year, color=\"b\", label=\"Gasoline\")\n plt.plot(years, diesel_by_year, color=\"r\", label=\"Diesel\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Cost ($/gallon)\")\n plt.title(\"Average Cost of Gasoline and Diesel Fuel over Time\")\n plt.savefig(\"../graphs/gas_vs_time.png\")\n plt.clf()\n\n plt.plot(years, ann_delay_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Annual per-Commuter Traffic Delays (hrs)\")\n plt.title(\"Average Annual per-Commuter Traffic Delays over Time\")\n plt.savefig(\"../graphs/delay_vs_time.png\")\n plt.clf()\n\n plt.plot(years, travel_index_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Travel Index\")\n plt.title(\"Average Travel Index over Time\")\n plt.savefig(\"../graphs/index_vs_time.png\")\n plt.clf()\n\n plt.plot(years, ump_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Unemployment Rate (%)\")\n plt.title(\"Average Unemployment Rate over Time\")\n plt.savefig(\"../graphs/ump_vs_time.png\")\n plt.clf()\n\n plt.plot(years, cost_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Cost ($)\")\n plt.title(\"Average Annual per-Capita Cost of Traffic Congestion over Time\")\n plt.savefig(\"../graphs/cost_vs_time.png\")\n plt.clf()\n\n plt.plot(years, excess_gas_per_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Excess Fuel Consumed (Gallons)\")\n plt.title(\"Average Annual per-Capita Excess Fuel Consumed over Time\")\n plt.savefig(\"../graphs/extra_fuel_vs_time.png\")\n plt.clf()\n\n x = list(lyft) # Lyft data\n y = list(uber) # Uber data\n bins = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018]\n\n plt.hist([x, y], bins, label=['Lyft', 'Uber'])\n plt.legend(loc='upper right')\n plt.xlabel(\"Year\")\n plt.ylabel(\"Number of cities entered\")\n plt.title(\"Uber and Lyft Entry into Urban Mobility Report Cities\")\n plt.clf()", "def main():\n\n # Load network for supplemental trip calculations\n my_project = emmeproj.EmmeProject(me_config.supplemental_project)\n network_importer(my_project)\n\n parcels_military = pd.read_csv('inputs/supplemental/enlisted_personnel_bkr.csv')\n parcels_military = parcels_military.loc[parcels_military['year'] == int(bkr_config.model_year)]\n parcels_urbansim = pd.read_csv(os.path.join(bkr_config.parcels_file_folder, access_config.parcels_file_name), sep = \" \", index_col = None )\n parcels_urbansim.index = parcels_urbansim['PARCELID']\n\n # FIXME: uniform upper/lower\n # Convert columns to upper case for now\n parcels_urbansim.columns = [i.upper() for i in parcels_urbansim.columns]\n\n ########################################\n # Add military jobs to parcel employment\n ########################################\n\n # Take sum of jobs across parcels; take first value for the parcel's TAZ ID\n parcels_military = parcels_military.groupby('PSRC_ID').agg({'military_jobs':'sum', 'BKRCastTAZ':'first'}).reset_index()\n parcels_military.index = parcels_military['PSRC_ID'].astype('int')\n\n # Update parcels with enlisted jobs, for Government employment (EMPGOV_P) category and Total employment (EMPTOT_P)\n parcels_urbansim['military_jobs'] = 0\n parcels_urbansim.update(parcels_military)\n\n for col in ['EMPGOV_P', 'EMPTOT_P']:\n parcels_urbansim[col] = parcels_urbansim[col] + parcels_urbansim['military_jobs']\n\n # Log summary of jobs per TAZ added for verification\n parcels_urbansim[parcels_urbansim['military_jobs'] > 0].groupby('TAZ_P').sum()[['military_jobs']].to_csv(r'outputs\\supplemental\\military_jobs_added.csv')\n\n # Drop military jobs column\n parcels_urbansim.drop('military_jobs', axis=1, inplace=True)\n\n #####################################################################################\n # Calculate Trip Distribution for Internal-External and External-Internal Work Trips\n #####################################################################################\n\n # Get Zone Index\n zonesDim = len(my_project.current_scenario.zone_numbers)\n zones = my_project.current_scenario.zone_numbers\n dictZoneLookup = dict((value,index) for index,value in enumerate(zones))\n\n # Load commute pattern data for workers in/out of PSRC region; keep only the needed columns\n # DB table \"external_trip_distribution\" generated from LEHD LODES data, 2014\n work = pd.read_csv('inputs/supplemental/external_trip_distribution_bkr.csv')\n ixxi_cols = ['Total_IE', 'Total_EI', 'SOV_Veh_IE', 'SOV_Veh_EI','HOV2_Veh_IE','HOV2_Veh_EI','HOV3_Veh_IE','HOV3_Veh_EI']\n work = work[['BKRCastTAZ','External_Station']+ixxi_cols]\n\n # Scale this based on forecasted employment growth between model and base year\n base_year_scaling = pd.read_csv('inputs/supplemental/base_year_scaling.csv')\n\n # Base year employment\n base_year_totemp = base_year_scaling[(base_year_scaling['year'] == int(bkr_config.supplemental_module_base_year)) & \n (base_year_scaling['field'] == 'emptot_p')]['value'].values[0]\n model_year_totemp = parcels_urbansim['EMPTOT_P'].sum()\n emp_scaling = model_year_totemp/base_year_totemp\n for col in work[ixxi_cols]:\n work[col] = np.where((work['BKRCastTAZ'].isin(me_config.EXTERNALS_DONT_GROW))|(work['External_Station'].isin(me_config.EXTERNALS_DONT_GROW)), work[col], work[col]*emp_scaling)\n\n # group trips by O-D TAZ's (trips from external stations to internal TAZs)\n w_grp = work.groupby(['BKRCastTAZ','External_Station']).sum()\n\n # FIXME: add some logging here to verify the results are as expected\n \n # Create empty numpy matrices for SOV, HOV2 and HOV3, populate with results\n w_SOV = np.zeros((zonesDim,zonesDim), np.float64)\n w_HOV2 = np.zeros((zonesDim,zonesDim), np.float16)\n w_HOV3 = np.zeros((zonesDim,zonesDim), np.float16)\n\n # Populate the numpy trips matrices; iterate through each internal TAZ (i) and External Station (j)\n for i in work['BKRCastTAZ'].value_counts().keys():\n for j in work.groupby('BKRCastTAZ').get_group(i)['External_Station'].value_counts().keys(): #all the external stations for each internal PSRC_TAZ\n #SOV\n w_SOV[dictZoneLookup[i],dictZoneLookup[j]] = w_grp.loc[(i,j),'SOV_Veh_IE']\n w_SOV[dictZoneLookup[j],dictZoneLookup[i]] = w_grp.loc[(i,j),'SOV_Veh_EI']\n #HOV2\n w_HOV2[dictZoneLookup[i],dictZoneLookup[j]] = w_grp.loc[(i,j),'HOV2_Veh_IE']\n w_HOV2[dictZoneLookup[j],dictZoneLookup[i]] = w_grp.loc[(i,j),'HOV2_Veh_EI']\n #HOV3\n w_HOV3[dictZoneLookup[i],dictZoneLookup[j]] = w_grp.loc[(i,j),'HOV3_Veh_IE']\n w_HOV3[dictZoneLookup[j],dictZoneLookup[i]] = w_grp.loc[(i,j),'HOV3_Veh_EI']\n # Get return trips (internal->external) by transposing external->internal trip table\n sov = w_SOV + w_SOV.transpose()\n hov2 = w_HOV2 + w_HOV2.transpose()\n hov3 = w_HOV3 + w_HOV3.transpose()\n\n matrix_dict = {'sov' : sov, 'hov2' : hov2, 'hov3' : hov3}\n\n # Create h5 files for export\n if not os.path.exists(me_config.supplemental_output_dir):\n os.makedirs(me_config.supplemental_output_dir)\n\n with open(os.path.join(me_config.supplemental_output_dir, 'ixxi_work_trips_summary.txt'), 'w') as fp:\n fp.write(f'{bkr_config.project_folder}\\n')\n daily_trips = 0\n for tod, factor in tod_factors.items():\n my_store = h5py.File(me_config.supplemental_output_dir + '/' + 'external_work_' + tod + '.h5', \"w\")\n todtrips = 0\n fp.write(f'{tod}\\n')\n for mode, matrix in matrix_dict.items():\n matrix = matrix * factor\n my_store.create_dataset(str(mode), data=matrix, compression = 'gzip')\n fp.write(f' {mode}: {matrix.sum()}\\n')\n todtrips += matrix.sum()\n print(f'total ixxi work trips in {tod}: {todtrips}')\n fp.write(f' subtotal: {todtrips}\\n')\n daily_trips += todtrips\n my_store.close()\t\n print(f'daily ixxi work trips: {daily_trips}')\n fp.write('\\n')\n fp.write(f'daily ixxi work trips: {daily_trips}')\n ##################################################\n # Create \"psrc_worker_ixxifractions\" file\n # Update numworkers per TAZ\n ##################################################\n\n w_grp.reset_index(inplace= True)\n w_grp.reset_index(inplace= True)\n observed_ixxi = w_grp.groupby('BKRCastTAZ').sum()\n observed_ixxi = observed_ixxi.reindex(zones, fill_value=0)\n observed_ixxi.reset_index(inplace = True)\n\n # Remove jobs from JBLM Military zones so they are NOT available in Daysim choice models\n # These jobs are assumed \"locked\" and not available to civilian uses so are excluded from choice sets\n parcels_urbansim = remove_employment_by_taz(parcels_urbansim, jblm_taz_list, parcel_emp_cols)\n hh_persons = h5py.File(bkr_config.households_persons_file, \"r\")\n parcel_grouped = parcels_urbansim.groupby('TAZ_P')\n emp_by_taz = pd.DataFrame(parcel_grouped['EMPTOT_P'].sum())\n emp_by_taz.reset_index(inplace = True)\n\n # Update the total number of workers per TAZ to account for removed military jobs\n person_df = h5_to_data_frame(hh_persons, 'Person')\n person_df = person_df.loc[(person_df.pwtyp > 0)]\n hh_df = h5_to_data_frame(hh_persons, 'Household')\n merged = person_df.merge(hh_df, how='left', on='hhno')\n merged_grouped = merged.groupby('hhtaz')\n workers_by_taz = pd.DataFrame(merged_grouped['pno'].count())\n workers_by_taz.rename(columns={'pno' :'workers'}, inplace = True)\n workers_by_taz.reset_index(inplace = True)\n\n # Calculate fraction of workers that do not work in the region, for each zone\n # Calculate fraction of jobs in each zone that are occupied by workers from external regions\n # These data are used to modify workplace location choices\n final_df = emp_by_taz.merge(workers_by_taz, how='left', left_on='TAZ_P', right_on='hhtaz')\n final_df = observed_ixxi.merge(final_df, how='left', left_on='BKRCastTAZ', right_on='TAZ_P')\n final_df['Worker_IXFrac'] = final_df.Total_IE/final_df.workers\n final_df['Jobs_XIFrac'] = final_df.Total_EI/final_df.EMPTOT_P\n\n final_df.loc[final_df['Worker_IXFrac'] > 1, 'Worker_IXFrac'] = 1\n final_df.loc[final_df['Jobs_XIFrac'] > 1, 'Jobs_XIFrac'] = 1\n\n final_df = final_df.replace([np.inf, -np.inf], np.nan) \n final_df = final_df.fillna(0)\n final_cols = ['BKRCastTAZ', 'Worker_IXFrac', 'Jobs_XIFrac']\n\n for col_name in final_df.columns:\n if col_name not in final_cols:\n final_df.drop(col_name, axis=1, inplace=True)\n final_df = final_df.round(3)\n\n final_df.to_csv(os.path.join(access_config.land_use_output_folder, 'bkr_worker_ixxifractions.dat'), sep = '\\t', index = False, header = False)\n #parcels_urbansim.to_csv(r'inputs/scenario/landuse/parcels_urbansim.txt', sep = ' ', index = False)\n\n my_project.closeDesktop()", "def new_labor_subcontract(self) -> float:\n driver_labor_cost = self.driver_labor_cost(self.inputs.trucks_total)\n depot_labor_cost = self.depot_labor_cost(self.inputs.trucks_total)\n landfill_labor_cost = self.landfill_labor_cost()\n return driver_labor_cost + depot_labor_cost + landfill_labor_cost", "def run_calc(self):\n\n from openquake.calculators import base, getters\n from openquake.baselib import config, performance, zeromq\n if self.vtag >= 11:\n from openquake.baselib import version\n else:\n from openquake.baselib import __version__ as version\n\n with self.calculator._monitor:\n self.calculator._monitor.username = ''\n try:\n # Pre-execute setups\n self.calculator.pre_execute()\n\n #self.calculator.datastore.swmr_on()\n oq = self.calculator.oqparam\n dstore = self.calculator.datastore\n self.calculator.set_param()\n self.calculator.offset = 0\n\n # Source model\n #print('self.__dict__ = ')\n #print(self.calculator.__dict__)\n if oq.hazard_calculation_id: # from ruptures\n dstore.parent = self.calculator.datastore.read(\n oq.hazard_calculation_id)\n elif hasattr(self.calculator, 'csm'): # from sources\n self.calculator_build_events_from_sources()\n #self.calculator.build_events_from_sources()\n if (oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False):\n return {}\n elif 'rupture_model' not in oq.inputs:\n logging.warning(\n 'There is no rupture_model, the calculator will just '\n 'import data without performing any calculation')\n fake = logictree.FullLogicTree.fake()\n dstore['full_lt'] = fake # needed to expose the outputs\n dstore['weights'] = [1.]\n return {}\n else: # scenario\n self.calculator._read_scenario_ruptures()\n if (oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False):\n return {}\n\n # Intensity measure models\n if oq.ground_motion_fields:\n if self.vtag >= 12:\n imts = oq.get_primary_imtls()\n nrups = len(dstore['ruptures'])\n base.create_gmf_data(dstore, imts, oq.get_sec_imts())\n dstore.create_dset('gmf_data/sigma_epsilon',\n getters.sig_eps_dt(oq.imtls))\n dstore.create_dset('gmf_data/time_by_rup',\n getters.time_dt, (nrups,), fillvalue=None)\n elif self.vtag == 11:\n imts = oq.get_primary_imtls()\n nrups = len(dstore['ruptures'])\n base.create_gmf_data(dstore, len(imts), oq.get_sec_imts())\n dstore.create_dset('gmf_data/sigma_epsilon',\n getters.sig_eps_dt(oq.imtls))\n dstore.create_dset('gmf_data/time_by_rup',\n getters.time_dt, (nrups,), fillvalue=None)\n else:\n pass\n\n # Prepare inputs for GmfGetter\n nr = len(dstore['ruptures'])\n logging.info('Reading {:_d} ruptures'.format(nr))\n if self.vtag >= 12:\n rgetters = getters.get_rupture_getters(dstore, oq.concurrent_tasks * 1.25,\n srcfilter=self.calculator.srcfilter)\n elif self.vtag == 11:\n rgetters = getters.gen_rupture_getters(dstore, oq.concurrent_tasks)\n else:\n rgetters = getters.gen_rupture_getters(dstore, self.calculator.srcfilter, oq.concurrent_tasks)\n\n \n args = [(rgetter, self.calculator.param) for rgetter in rgetters]\n mon = performance.Monitor()\n mon.version = version\n mon.config = config\n rcvr = 'tcp://%s:%s' % (config.dbserver.listen,\n config.dbserver.receiver_ports)\n skt = zeromq.Socket(rcvr, zeromq.zmq.PULL, 'bind').__enter__()\n mon.backurl = 'tcp://%s:%s' % (config.dbserver.host, skt.port)\n mon = mon.new(\n operation='total ' + self.calculator.core_task.__func__.__name__, measuremem=True)\n mon.weight = getattr(args[0], 'weight', 1.) # used in task_info\n mon.task_no = 1 # initialize the task number\n args += (mon,)\n\n self.args = args\n self.mon = mon\n self.dstore = dstore\n\n finally:\n print('FetchOpenQuake: OpenQuake Hazard Calculator defined.')\n # parallel.Starmap.shutdown()", "def payroll_calculation():\n\n name = search_employee()\n if name == None:\n return\n accrual_month = month('Accrual month: ')\n accrual_year = year('Accrual year: ')\n accrual = f'{accrual_month}-{accrual_year}'\n salary_value = month_salary()\n salary_base = salary(salary_value)\n overtime = value_input('Overtime: ')\n absences = value_input('Absences: ')\n late = value_input('Late: ')\n bonus = value_input('Bonus: ')\n\n hourly_wage = round(salary_value / 220, 2)\n overtime_value = round(float(hourly_wage * 1.5), 2)\n overtime_total = round(overtime_value * overtime, 2)\n daily_wage = round(salary_value / 30, 2)\n absences_value = round(daily_wage * absences, 2)\n late_value = round(daily_wage * late / 60, 2)\n inss_value = inss(salary_base, overtime_total)\n irrf_value = irrf(salary_base, overtime_total, inss_value, bonus)\n sleep(2)\n\n\n\n header('EARNINGS')\n print(f'Salary: {salary_base}')\n print(f'Bonus: {bonus}')\n print(f'Overtime: {overtime_total }')\n earnings_total = round(salary_base + overtime_total + bonus, 2)\n sleep(2)\n\n print(line())\n print(f'Earnings total: {earnings_total}')\n print(line())\n sleep(2)\n\n header('DISCOUNTS')\n\n transportation_vouchers = round(salary_base * 6 / 100, 2)\n health_care = round(salary_base * 2 / 100, 2)\n dental_care = round(salary_base * 0.5 / 100, 2)\n meal_ticket = round(salary_base * 1 / 100, 2)\n\n print(f'absences: {absences_value}')\n print(f'late: {late_value}')\n print(f'transportation_vouchers: {transportation_vouchers}')\n print(f'health_care: {health_care}')\n print(f'dental_care: {dental_care}')\n print(f'meal_ticket: {meal_ticket}')\n print(f'inss_value: {inss_value}')\n print(f'irrf_value: {irrf_value}')\n\n discounts_total = round(absences_value + late_value + transportation_vouchers + health_care +\n dental_care + meal_ticket + inss_value + irrf_value, 2)\n\n print(line())\n print(f'Discounts_total : {discounts_total }')\n print(line())\n liquid_salary = round(earnings_total - discounts_total, 2)\n print(f'Liquid_salary: {liquid_salary} ')\n print(line())\n\n conn = sqlite3.connect('data/people_management.db')\n cursor = conn.cursor()\n cursor.execute(f\"\"\"\n INSERT INTO salary (name, salary ,bonus, overtime, absences_value, late_value, \n t_vouchers, health_care, dental_care, meal_ticket, inss, irrf, \n earnings, discounts, liquid_salary, accrual)\n VALUES ('{name}', '{salary_base}' ,'{bonus}', '{overtime_total}', '{absences_value}', \n '{late_value}', '{transportation_vouchers}', '{health_care}', '{dental_care}', \n '{meal_ticket}', '{inss_value}', '{irrf_value}', '{earnings_total}', '{discounts_total}', \n '{liquid_salary}', '{accrual}')\n \"\"\")\n conn.commit()\n conn.close()", "def calculate(self, waste_heat=0):\n radiator_type = self.radiators_type.get()\n data = self.radiators[radiator_type]\n if not waste_heat:\n for _, subsystem in self.data.wasteheat.items():\n waste_heat += subsystem\n self.waste_heat.set(waste_heat)\n area = waste_heat / (data[\"Specific area heat\"] * 1000)\n mass = (area * 1000) * data[\"Specific area mass\"]\n self.data.masses[\"Lifesupport Radiators\"] = mass\n self.area.set(area)\n self.mass.set(mass)\n self.radiator_temperature.set(self.radiators[radiator_type][\"Radiator Temperature\"])", "def calculate(self):\n # Clear the results\n self.calc_power = 0.0\n self.calc_data = 0.0\n self.calc_num_batt = 0.0\n\n for tab in range(self.tabSubsystem.count()):\n self.tabSubsystem.widget(tab).calculate()\n # print(self.tabSubsystem.widget(tab).cwpblDoubleSpinBox.value())\n\n # Accumlate the values\n self.calc_data += self.tabSubsystem.widget(tab).calc_data\n self.calc_num_batt += self.tabSubsystem.widget(tab).calc_num_batt\n self.calc_power += self.tabSubsystem.widget(tab).calc_power\n\n # Update the display\n self.powerLabel.setText(str(round(self.calc_power, 2)) + \" watt*hr\")\n self.numBatteriesLabel.setText(str(round(self.calc_num_batt, 2)) + \" batteries\")\n self.dataUsageLabel.setText(str(DS.bytes_2_human_readable(self.calc_data)))\n\n # Calculate the power usage for progessbar\n battery_pwr = self.batteryTypeComboBox.itemData(self.batteryTypeComboBox.currentIndex())\n battery_usage_percent = Power.calculate_battery_usage(self.calc_power,\n self.numBatteriesSpinBox.value(),\n battery_pwr)\n if battery_usage_percent > 1:\n self.batteryProgressBar.setValue(100)\n else:\n self.batteryProgressBar.setValue(battery_usage_percent * 100.0)\n\n # Calculate the data usage for progressbar\n # Convert GB to bytes\n sd_card_mb = self.storageSizeSpinBox.value() * 1024.0\n if sd_card_mb == 0:\n sd_card_mb = 32 * 1024.0\n self.storageSizeSpinBox.setValue(32)\n data_usage_to_mb = self.calc_data / 1048576.0\n data_usage_percentage = (data_usage_to_mb / sd_card_mb) * 100.0\n\n if data_usage_percentage > 100:\n self.dataUsageProgressBar.setValue(100)\n else:\n self.dataUsageProgressBar.setValue(data_usage_percentage)\n\n # Salinity Label\n if self.cwsSpinBox.value() <= 5.0:\n self.salinityLabel.setText(\"Fresh Water\")\n self.salinityLabel.setStyleSheet(\"color: blue;\")\n else:\n self.salinityLabel.setText(\"Salt Water\")\n self.salinityLabel.setStyleSheet(\"\")\n\n # Recording Label\n if self.cerecordCheckBox.isChecked():\n self.recordingLabel.setText(\"Recording ON\")\n self.recordingLabel.setStyleSheet(\"\")\n else:\n self.recordingLabel.setText(\"Recording OFF\")\n self.recordingLabel.setStyleSheet(\"color: red;\")\n\n # Update the command file\n self.update_command_file()", "def start(self):\n ### input check ### ? or done automaticly, how optional?\n self.ctx.last_calc2 = None\n self.ctx.loop_count2 = 0\n self.ctx.calcs = []\n self.ctx.successful2 = False\n\n # set values, or defaults, default: always converge charge density, crit < 0.00002, max 4 fleur runs\n self.ctx.max_number_runs = self.inputs.wf_parameters.get_dict().get('relax_runmax', 4)\n self.ctx.max_force_cycle = self.inputs.wf_parameters.get_dict().get('max_force_cycle', 4)\n print 'start'", "def process(self):\n\t\tif self.update_check() or self.force_update:\n\t\t\tself.district_check() #pull all local data and regions\n\t\t\tself.fix() #fix data anomalies - e.g add in Bucks.\n\t\t\tself.save_all() #store a copy of the data\n\t\t\tself.ingest() #add data to models\n\t\t\tself.update_totals() #calculate weekly data\n\t\telse:\n\t\t\tlog.info('PHE cases up to date')", "def run_performance():\n # Create a Struct data instance from config\n inputs = Struct(config)\n inputs.throttle = throttle\n # Get oxidizer properties at the given temperature\n n2o = n2o_properties(inputs.ox.T_tank)\n # Our integration variables are oxidizer mass and liquid oxidizer volume\n Mox = n2o.rho_l*(inputs.ox.liquid_V) + n2o.rho_g*(inputs.ox.tank_V-inputs.ox.liquid_V)\n if inputs.options.output_on:\n print(\"Initial oxidizer mass: {} kg.\".format(Mox))\n\n start = time.perf_counter() # Start timer for integration\n\n time, record = integration(inputs) # Time = time for integration, record = output data\n F_thrust = record.F_thrust\n p_cc = record.p_cc\n p_oxtank = record.p_oxtank\n p_oxpresstank = record.p_oxpresstank\n p_fueltank = record.p_fueltank\n p_fuelpresstank = record.p_fuelpresstank\n p_oxmanifold = record.p_oxmanifold\n T_oxtank = record.T_oxtank\n T_cc = record.T_cc\n area_core = record.area_core\n OF = record.OF_i\n gamma_ex = record.gamma_ex\n m_dot_ox = record.m_dot_ox\n m_dot_fuel = record.m_dot_fuel\n p_crit = record.p_crit\n m_dot_ox_crit = record.m_dot_ox_crit\n M_e = record.M_e\n p_exit = record.p_exit\n p_shock = record.p_shock\n\n time_elapsed = start-time.perf_counter() # Stop the timer and print elapsed time\n if inputs.options.output_on:\n print(\"Time elapsed for this timestep: {} sec.\".format(time_elapsed))", "def mgcEval(self):\n import numpy as np\n def report_to_df(report):\n\n \"\"\"\n function to convert classification report to dataframe (for visualisation plot)\n \"\"\"\n\n report = re.sub(r\" +\", \" \", report).replace(\"avg / total\", \"avg/total\").replace(\"\\n \", \"\\n\")\n # update this due to sklearn classification report output change\n report = re.sub(r\" +\", \" \", report).replace(\"micro avg\", \"micro_avg\").replace(\"macro avg\", \"macro_avg\").replace(\"weighted avg\", \"weighted_avg\").replace(\"\\n \", \"\\n\")\n report_df = pd.read_csv(StringIO(\"Classes\" + report), sep=' ', index_col=0) \n return(report_df)\n \n #txt report to df\n class_rpttop1 = classification_report(self.y_true, self.y_pred)\n df_report = report_to_df(class_rpttop1)\n\n df_report = df_report.iloc[:self.nb_classes, :].copy()\n df_report.index = df_report.index.astype(int)\n \n\n # classifier prediction metrics\n def classMetrics(averagex):\n precision, recall, fscore, support = score(self.y_true, self.y_pred, average=averagex)\n \n return(\n print(''), \n print('-------------{0:}--------------------'.format(averagex)), \n print('precision: {0:.4f}'.format(precision)),\n print('recall: {0:.4f}'.format(recall)),\n print('fscore: {0:.4f}'.format(fscore)),\n print(''),\n print('kappa score: {0:.4f}'.format(cohen_kappa_score(self.y_true, self.y_pred))),\n print('accuracy score: {0:.4f}'.format(accuracy_score(self.y_true, self.y_pred))))\n \n def predSamp():\n\n correct = np.nonzero(self.y_pred==self.y_true)[0]\n incorrect = np.nonzero(self.y_pred!=self.y_true)[0]\n\n # quick check of the number of correct prediction from validation set\n print(\"\")\n print(\"correct/total = {0: .4f}\".format(len(correct)/(len(correct)+len(incorrect))))\n print(\"total correct sample = {0: .0f}\".format(len(correct)))\n print('------------------------------------------------------------------')\n \n def classReport():\n print('----------------------------- Classfication Report -------------------------------')\n print(classification_report(pd.Series(self.y_true).map(self.dict_label), pd.Series(self.y_pred).map(self.dict_label)))\n \n self.class_rpt = pd.concat([pd.DataFrame(pd.Series(df_report.index.tolist()).map(self.dict_label), columns = ['label']), df_report], axis = 1)\n \n self.classMetricsMac = classMetrics(\"macro\")\n self.classMetricsMic = classMetrics(\"micro\")\n self.predSample = predSamp()\n self.class_rptTop1 = classReport()\n \n return self", "def __init__(self, total_cost, ann_rate, ann_salary, portion_saved):\r\n\t\tself.total_cost = total_cost\r\n\t\tself.portion_down_payment = total_cost*0.25\r\n\t\tself.ann_rate = ann_rate\r\n\t\tself.monthly_salary = ann_salary/12\r\n\t\tself.portion_saved = portion_saved\r\n\t\tself.current_savings = [0.0,]\r\n\t\tself.months = 0\r\n\t\tself.new_saving = 0", "def calculate():\n\n # Get all input data from the GUI\n age = float(age_input.get())\n weight = float(weight_input.get())\n height = float(height_input.get())\n heartrate = float(heartrate_input.get())\n duration = float(duration_input.get())\n\n if gender.get() == 0:\n # Calculate data for males\n bmr = male_bmr(weight, height, age)\n gross_calories = male_calories(heartrate, weight, age, duration)\n else:\n # Calculate data for females\n bmr = female_bmr(weight, height, age)\n gross_calories = female_calories(heartrate, weight, age, duration)\n\n net_calories = gross_calories - (bmr / 1440 * duration)\n\n # Display calculated data\n bmr_output.config(text=int(bmr))\n gross_output.config(text=int(gross_calories))\n net_output.config(text=int(net_calories))", "def cost_volume_profit():\r\n c = float(input(\"Please Enter Total Fixed Costs Value: \"))\r\n a = float(input(\"Please Enter Sale Price Per Unit: \"))\r\n b = float(input(\"Please Enter Variable Cost Per Unit: \"))\r\n ccm = float(a)-float(b)\r\n cuu = float(c)/float(ccm)\r\n ccmr = (float(ccm)/float(a))*float(100)\r\n cda = float(c)/(float(ccmr)/float(100))\r\n print \">> Your Contribution Margin is\",ccm\r\n print \">> Your Breakeven Sales in Units is\",round(cuu)\r\n print \">> Your Contribution Margin Ratio is\",ccmr,\"%\"\r\n print \">> Your Breakeven Sales in Dollars is\",cda,\"\\n\"\r\n qq = input(\" Press 1 To Compute Target Profit\\n Press 2 To Compute Margin of Safety\\n Press 3 To Perform Sensitivity Analysis\\n Or Press 0 To Exit: \")\r\n if(qq == 1):\r\n dds = float(input(\"Please Enter Your Target Profit: \"))\r\n xxx = (float(c)+float(dds))/float(ccm)\r\n xxxx = (float(c)+float(dds))/(float(ccmr)/float(100))\r\n print \">> Your Target Profit in Units To Earn\",dds,\"$ is\",round(xxx)\r\n print \">> Your Target Profit in Dollars To Earn\",dds,\"$ is\",xxxx\r\n elif(qq == 0):\r\n print \"Canceled\"\r\n elif(qq == 2):\r\n xc = float(input(\"Please Enter Expected Sales in Units: \"))\r\n zzz = float(xc)-float(cuu)\r\n zzzz = float(zzz)*float(a)\r\n print \">> Your Margin of Safety in Units is\",round(zzz)\r\n print \">> Your Margin of Safety in Dollars is\",zzzz\r\n elif(qq == 3):\r\n i = input(\"Please Enter Total Fixed Costs Value: \")\r\n o = input(\"Please Enter Sale Price Per Unit: \")\r\n p = input(\"Please Enter Variable Cost Per Unit: \")\r\n n = 0\r\n for x,y,z in zip(i,o,p):\r\n cm = float(y)-float(z)\r\n uu = float(x)/float(cm)\r\n cmr = (float(cm)/float(y))*float(100)\r\n da = float(x)/(float(cmr)/float(100))\r\n n += 1\r\n print \"Your Results in Case\",int(n),\"is :\"\r\n print \">> Your Contribution Margin is\",cm\r\n print \">> Your Breakeven Sales in Units is\",round(uu)\r\n print \">> Your Contribution Margin Ratio is\",cmr,\"%\"\r\n print \">> Your Breakeven Sales in Dollars is\",da,\"\\n\"\r\n if(cm > ccm):\r\n a = float(cm)-float(ccm)\r\n print \">> Your Contribution Margin Increased by\",a\r\n elif(ccm > cm):\r\n a = float(ccm)-float(cm)\r\n print \">> Your Contribution Margin Decreased by\",a\r\n if(uu > cuu):\r\n b = float(uu)-float(cuu)\r\n print \">> Your Breakeven Sales in Units Increased by\",round(b)\r\n elif(cuu > uu):\r\n b = float(cuu)-float(uu)\r\n print \">> Your Breakeven Sales in Units Decreased by\",round(b)\r\n if(cmr > ccmr):\r\n c = float(cmr)-float(ccmr)\r\n print \">> Your Contribution Margin Ratio Increased by\",c,\"%\"\r\n elif(ccmr > cmr):\r\n c = float(ccmr)-float(cmr)\r\n print \">> Your Contribution Margin Ratio Decreased by\",c,\"%\"\r\n if(da > cda):\r\n d = float(da)-float(cda)\r\n print \">> Your Breakeven Sales in Dollars Increased by\",d\r\n elif(cda > da):\r\n d = float(cda)-float(da)\r\n print \">> Your Breakeven Sales in Dollars Decreased by\",d,\"\\n\"", "def run(self):\n\n self.preprocess()\n self.restore_ratings()\n self.prepare_UI()\n self.loop_through_units()\n self.cleanup()\n\n print('\\nAll Done - results are available in:\\n\\t{}'.format(self.out_dir))", "def main():\n\n print(\"Finding maximum clade credibility tree...\")\n find_mcc_tree()\n print(\"Computing clade credibilities...\")\n compute_clade_probabilities()\n if _CAN_PLOT:\n print(\"Plotting maximum clade credibility tree...\")\n plot_mcc_tree()\n else:\n print(\"Skipping plotting tree due to lack of PyQt4 support. :(\")\n print(\"Computing posterior mean paramter estimates...\")\n ranked_means = utils.write_means(\"indoeuropean.log\", \"parameter_means.csv\")\n print(\"Computing ranking correlations...\")\n compute_ranking_correls(ranked_means)\n print(\"Generating LaTeX table...\")\n make_table(ranked_means)\n print(\"Generating rate variation figure...\")\n make_figure(\"category_rates.eps\")", "def after_run(self):\n # Calculate the performance of the strategy and portfolio\n self.portfolio.calc_stats()\n self.calc_performance()\n\n return self", "def main_loop(csd_profile, csd_seed, total_ele):\n csd_name = csd_profile.func_name\n print 'Using sources %s - Seed: %d ' % (csd_name, csd_seed)\n h = 10.\n\n #TrueCSD\n start_x, end_x, csd_res = [0.,1.,100] \n t_csd_x, true_csd = generate_csd_1D(csd_profile, csd_seed, \n start_x=start_x, \n end_x=end_x, \n res_x=csd_res)\n \n #Electrodes \n ele_res = int(total_ele) \n ele_lims = [0.10, 0.9]\n ele_pos, pots = electrode_config(ele_lims, ele_res, true_csd, t_csd_x, h)\n num_ele = ele_pos.shape[0]\n print 'Number of electrodes:', num_ele\n x_array_pots, true_pots = electrode_config(ele_lims, 100, true_csd, t_csd_x, h)\n\n #kCSD estimation\n gdX = 0.01\n x_lims = [0.,1.] #CSD estimation place\n tic = time.time() #time it\n k, est_csd, est_pot = do_kcsd(ele_pos, pots, h=h, gdx=gdX,\n xmin=x_lims[0], xmax=x_lims[1], n_src_init=300)\n toc = time.time() - tic\n\n #RMS of estimation - gives estimate of how good the reconstruction was\n chr_x, test_csd = generate_csd_1D(csd_profile, csd_seed,\n start_x=x_lims[0], end_x=x_lims[1], \n res_x=int((x_lims[1]-x_lims[0])/gdX))\n rms = np.linalg.norm(abs(test_csd - est_csd[:,0]))\n rms /= np.linalg.norm(test_csd)\n\n #Plots\n title =\"Lambda: %0.2E; R: %0.2f; CV_Error: %0.2E; RMS_Error: %0.2E; Time: %0.2f\" %(k.lambd, k.R, k.cv_error, rms, toc)\n make_plots(title, t_csd_x, true_csd, ele_pos, pots, k.estm_x, est_csd, est_pot, true_pots)\n return", "def main():\n \n file = open(\"profit.txt\",\"r\")\n profits = file.readlines()\n file = open(\"revenue.txt\",\"r\")\n revenues = file.readlines()\n file.close()\n \n totalProfits = 0\n for i in range(len(profits)):\n totalProfits += float(profits[i])\n \n print \"Total Fortune 500 profits, \" + os.environ['YEAR'] + \": $\" + str(totalProfits) + \"M\"\n \n totalRevenues = 0\n for j in range(len(revenues)):\n totalRevenues += float(revenues[i])\n \n print \"Total Fortune 500 revenues, \" + os.environ['YEAR'] + \": $\" + str(totalRevenues) + \"M\"", "def Execute(self,settings,IsStatusBar=False): \n if settings.IsSeed:\n np.random.seed(5) \n \n self._IsInitial = True\n self.settings = settings\n self.sim_t = copy.copy(settings.starttime) # does not have to start at zero if we perform sequential simulations \n self.X_matrix = copy.deepcopy(settings.X_matrix) \n self.fixed_species_amount = copy.deepcopy(self.parse.fixed_species_amount) \n \n try:\n self.volume_code = settings.volume_code\n except AttributeError: # No volume_code present in settings\n self.volume_code = \"self._current_volume = 1\" \n \n #self.species_to_update = [s for s in range(self.n_species)] # ensure that the first run updates all species \n self.Propensities() \n \n if not self.sim_t: \n self.timestep = 1 \n self.sim_output = []\n self.propensities_output = [] \n self.V_output = []\n self._IsTrackPropensities = copy.copy(settings.IsTrackPropensities)\n self.SpeciesSelection() \n self.RateSelection() \n self.SetEvents() # April 15, moved into here, because otherwise each new cell division cycle starts with a time event, if specified \n if not settings.IsOnlyLastTimepoint:\n self.Initial_Conditions() \n \n nstep_counter = 1\n t1 = time.time()\n while (self.sim_t < settings.endtime) and (self.timestep < settings.timesteps): \n if self.sim_a_0 <= 0: # All reactants got exhausted\n settings.endtime = 10**50\n break\n \n self.RunExactTimestep() # Run direct SSA \n self.HandleEvents()\n \n # Update Propensities selectively \n if self.sim_t < settings.endtime: \n if not self._IsPerformEvent:\n self.species_to_update = self.parse.reaction_affects[self.reaction_index] # Determine vars to update \n else:\n self.species_to_update = [s for s in range(self.n_species)] \n \n self.Propensities()\n \n if not settings.IsOnlyLastTimepoint: # Store Output\n self.GenerateOutput()\n \n self._IsPerformEvent = False # set to false (or just to make sure).\n t2 = time.time() \n if IsStatusBar and t2-t1> 1:\n t1 = time.time()\n sys.stdout.write('\\rsimulating {0:s}\\r'.format('.'*nstep_counter) ) \n sys.stdout.flush() \n nstep_counter+=1\n if nstep_counter > 10:\n nstep_counter = 1 \n sys.stdout.write('\\rsimulating {0:s} '.format('.'*nstep_counter))\n sys.stdout.flush()\n if settings.IsOnlyLastTimepoint or settings.endtime != 10**50: \n self.GenerateOutput() \n if IsStatusBar and t1 and not settings.quiet:\n sys.stdout.write('\\rsimulation done! \\n')", "def run(self):\n\t\tself.print_header_information()\n\n\t\t#self.get_number_of_instances_from_user()\n\n\t\t#self.compile_dataframe(self.number_of_instances)\n\n\t\tprint \"\\n{}\".format(self.data)\n\n\t\t# Uncomment these lines for debugging\n\t\tself.compile_dataframe_default()\n\t\t# print \"\\n{}\".format(self.data)\n\n\t\tself.analysis_of_dataframe(self.data)", "def run():\n\twrite_fuel_data()", "def main():\n station = \"Merikannontie\"\n coefs, score = cycling_weather_linregr(station)\n print(f\"Measuring station: {station}\")\n print(\n f\"Regression coefficient for variable 'precipitation': {coefs[0]:.1f}\")\n print(f\"Regression coefficient for variable 'snow depth': {coefs[1]:.1f}\")\n print(f\"Regression coefficient for variable 'temperature': {coefs[2]:.1f}\")\n print(f\"Score: {score:.2f}\")\n return", "def beginExecution(self):\n self.setup = self.am_getOption(\"Setup\", self.setup)\n self.enabled = self.am_getOption(\"EnableFlag\", self.enabled)\n self.restartAgents = self.am_getOption(\"RestartAgents\", self.restartAgents)\n self.restartExecutors = self.am_getOption(\"RestartExecutors\", self.restartExecutors)\n self.restartServices = self.am_getOption(\"RestartServices\", self.restartServices)\n self.diracLocation = os.environ.get(\"DIRAC\", self.diracLocation)\n self.addressTo = self.am_getOption('MailTo', self.addressTo)\n self.addressFrom = self.am_getOption('MailFrom', self.addressFrom)\n self.controlComponents = self.am_getOption('ControlComponents', self.controlComponents)\n self.commitURLs = self.am_getOption('CommitURLs', self.commitURLs)\n\n self.csAPI = CSAPI()\n\n res = self.getRunningInstances(instanceType='Agents')\n if not res[\"OK\"]:\n return S_ERROR(\"Failure to get running agents\")\n self.agents = res[\"Value\"]\n\n res = self.getRunningInstances(instanceType='Executors')\n if not res[\"OK\"]:\n return S_ERROR(\"Failure to get running executors\")\n self.executors = res[\"Value\"]\n\n res = self.getRunningInstances(instanceType='Services')\n if not res[\"OK\"]:\n return S_ERROR(\"Failure to get running services\")\n self.services = res[\"Value\"]\n\n self.accounting.clear()\n return S_OK()", "def efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.shortestpathij(i, j) == None):\n continue\n Temp += 1/self.shortestpathij(i, j)\n \n self.efficiency = 1/(self.supplynum*self.demandnum)*Temp", "def main():\n path_for_data = '/Users/avielshtern/Desktop/semb/iml/IML.HUJI-master/data/kc_house_data (1).csv'\n design_matrix, response_vector = load_data(path_for_data)\n putting_it_all_together_1(design_matrix, response_vector)\n putting_it_all_together_2(design_matrix, response_vector)\n feature_evaluation(design_matrix, response_vector)", "def calculate(self):\n #runs = [ai\n # for ei in self.experiment_queues\n # for ai in ei.cleaned_automated_runs]\n #\n #ni = len(runs)\n #self.nruns = ni\n # for ei in self.experiment_queues:\n # dur=ei.stats.calculate_duration(ei.cleaned_automated_runs)\n # if\n\n\n tt = sum([ei.stats.calculate_duration(ei.cleaned_automated_runs)\n for ei in self.experiment_queues])\n self._total_time = tt\n offset = 0\n if self._start_time:\n offset = time.time() - self._start_time\n\n self.etf = self.format_duration(tt - offset)", "def calc_resources(self):\n self.popula = self.energy = self.popula_used = self.energy_used = 0\n self.cnt_public = self.cnt_shop = self.cnt_1 = self.cnt_2 = self.cnt_3 = self.cnt_4 = self.cnt_5 = self.cnt_office = 0\n self.popula += self.extra_pop\n for i in range(20):\n b = self.b[i]\n if b == 'T':\n self.popula += self.f[i] * 2\n self.energy_used += 1\n elif b == 'O':\n self.popula_used += 1\n self.energy_used += 1\n self.cnt_office += self.f[i]\n elif b == 'U':\n self.popula_used += 1\n self.cnt_public += 1\n elif b == 'S':\n self.energy_used += 1\n self.cnt_shop += 1\n elif b == '1':\n self.popula += 1\n self.energy += 1\n self.popula_used += 1\n self.cnt_1 += 1\n elif b == '2':\n self.popula_used += 1\n self.cnt_2 += 1\n elif b == '3':\n self.popula_used += 1\n self.cnt_3 += 1\n elif b == '4':\n self.popula += 2\n self.popula_used += 1\n self.cnt_4 += 1\n elif b == '5':\n self.energy += 2\n self.popula_used += 1\n self.cnt_5 += 1\n elif b == 'A':\n self.energy += 2\n self.popula_used += 1\n elif b == 'F':\n self.energy += 3\n self.popula_used += 1\n elif b == 'G':\n self.popula += 1\n if 'tvst' in args.exp:\n self.popula += self.cnt_shop\n if 'ward' in args.exp:\n self.popula += 3\n if 'elec' in args.exp:\n self.energy += 3\n if 'capi' in args.exp:\n self.popula_used += 2\n if 'fire' in args.exp:\n self.popula_used += 1\n if 'park' in args.exp:\n self.popula_used += 1", "def main():\n\t#print(scipy.__version__)\n\t#image()\n\t#heat_capacity2()\n\t#hist()\n\t#single_plot()\n\n\t#heat_capacity2()\n\t#single_plot()\n\t#plt.show()\n\t#u0_tc()\n\t#multi_heat_capacity(\"HL_DM_flux5\",True)\n\t#multi_heat_capacity2()\n\t#plot_spin()\n\t#plt.show()\n\theat_capacity2(1,2)\n\t#hist()\n\tplt.show()\n\t#potential()\n\t#plt.show()\n\t#heat_capacity(3,4)\n\t#heat_capacity(5,6)\n\t#heat_capacity(7,8)\n\t#final_spins()\n\t#plot_spin()\n\t#plot_from_csv()\n\t#difference_plot()", "def run(self, refresh=True):\n\n progress = Progress(\n \"[progress.description]{task.description}\",\n TextColumn(\"[bold green]{task.fields[measures]}\", justify=\"right\"),\n TextColumn(\n \"[dark_goldenrod]Truncated CM {task.fields[conf_matrix]}\",\n justify=\"right\",\n ),\n BarColumn(),\n \"[progress.percentage]{task.percentage:>3.0f}%\",\n TimeRemainingColumn(),\n auto_refresh=False,\n )\n\n logname = self.args.logname\n print(\"Log stored at: \", logname)\n run = wandb.init(\n project=\"information-obfuscation\",\n entity=\"peiyuanl\",\n name=logname,\n config=vars(self.args),\n )\n dirname = os.path.join(\n \"../checkpoints\",\n self.args.experiment,\n self.args.task,\n self.args.model,\n logname,\n )\n Path(dirname).mkdir(parents=True, exist_ok=True)\n\n with progress:\n gender_adv_tasks = []\n age_adv_tasks = []\n occupation_adv_tasks = []\n\n # To ensure layout correctness\n\n gender_task = progress.add_task(\n \"[cyan]Gender Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n for name in self.get_ordered_adversary_names():\n gender_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Gender {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n age_task = progress.add_task(\n \"[cyan]Age Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n for name in self.get_ordered_adversary_names():\n age_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Age {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n occupation_task = progress.add_task(\n \"[cyan]Occupation Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n\n for name in self.get_ordered_adversary_names():\n occupation_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Age {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n self.train_task_with_adversary(\n \"gender\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=gender_task,\n adv_tasks=gender_adv_tasks,\n )\n self.train_task_with_adversary(\n \"age\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=age_task,\n adv_tasks=age_adv_tasks,\n )\n self.train_task_with_adversary(\n \"occupation\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=occupation_task,\n adv_tasks=occupation_adv_tasks,\n )\n\n trained_model_artifact = wandb.Artifact(\n logname + \"_model\", type=\"model\", description=\"Task and adversary models\"\n )\n trained_model_artifact.add_dir(dirname)\n run.log_artifact(trained_model_artifact)\n\n dataset_artifact = wandb.Artifact(\n logname + \"_dataset\",\n type=\"dataset\",\n description=\"Dataset used to train the models\",\n )\n dataset_artifact.add_dir(MOVIELENS_1M_DIR)\n run.log_artifact(dataset_artifact)", "def calc_performance(self):\n for symbol in self.portfolio.assets.keys():\n\n # Total the Performance of all the trades\n start = self.portfolio.trades[symbol].index[0]\n end = self.portfolio.trades[symbol].index[-1]\n trades = len(self.record[symbol])\n profit = self.record[symbol]['profit'].sum()\n loss = self.record[symbol]['loss'].sum()\n # Total or average the trade info for all the trades\n try:\n wins = len(self.record[symbol].groupby('win/loose').groups['w'])\n except (ValueError, KeyError):\n wins = 0\n try:\n losses = len(self.record[symbol].groupby('win/loose').groups['l'])\n except (ValueError, KeyError):\n losses = 0\n try:\n washes = len(self.record[symbol].groupby('win/loose').groups['-'])\n except (ValueError, KeyError):\n washes = 0\n max_drawdown = self.record[symbol]['drawdown'].max()\n average_drawdown = self.record[symbol]['drawdown'].mean()\n max_drawdown_time = self.record[symbol]['drawdown days'].max()\n average_drawdown_time = self.record[symbol]['drawdown days'].mean()\n # Average the risk and market comparisons for all trades\n vol_risk = self.record[symbol]['volatility'].mean()\n beta = self.record[symbol]['beta'].mean()\n lpm_risk = self.record[symbol]['lpm'].mean()\n e_r = self.record[symbol]['expected_return'].mean()\n # Calculate Risk measures\n treynor_ratio = (e_r - self.risk_free_return) / beta\n sharpe_ratio = (e_r - self.risk_free_return) / vol_risk\n # Package up the data for each symbol\n self.performance[symbol] = {\n 'start': start,\n 'end': end,\n 'trades': trades,\n 'wins': wins,\n 'losses': losses,\n 'washes': washes,\n 'profit': profit,\n 'loss': loss,\n 'net_profit': profit - loss,\n 'profit_factor': profit / loss if loss != 0 else 1.0,\n 'percent_profitable': wins / trades if trades != 0 else 0.0,\n 'average_trade_net_profit' : (profit - loss) / trades if trades != 0 else 0.0,\n 'max_drawdown' : max_drawdown,\n 'average_drawdown' : average_drawdown,\n 'max_drawdown_days' : max_drawdown_time,\n 'average_drawdown_days' : average_drawdown_time,\n 'volatility_risk' : vol_risk,\n 'beta' : beta,\n 'lower_partial_moment_risk' : lpm_risk,\n 't_r' : treynor_ratio,\n 's_r' : sharpe_ratio\n }\n\n return self", "def calculatedResults(meanReturns, covMatrix, riskFreeRate=0, constraintSet=(0,1)):\n # Max Sharpe Ratio Portfolio\n maxSR_Portfolio = maxSR(meanReturns, covMatrix)\n maxSR_returns, maxSR_std = portfolioPerformance(maxSR_Portfolio['x'], meanReturns, covMatrix)\n \n maxSR_allocation = pd.DataFrame(maxSR_Portfolio['x'], index=meanReturns.index, columns=['allocation'])\n maxSR_allocation.allocation = [round(i*100,0) for i in maxSR_allocation.allocation]\n \n # Min Volatility Portfolio\n minVol_Portfolio = minimizeVariance(meanReturns, covMatrix)\n minVol_returns, minVol_std = portfolioPerformance(minVol_Portfolio['x'], meanReturns, covMatrix)\n \n minVol_allocation = pd.DataFrame(minVol_Portfolio['x'], index=meanReturns.index, columns=['allocation'])\n minVol_allocation.allocation = [round(i*100,0) for i in minVol_allocation.allocation]\n\n # Efficient Frontier\n efficientList = []\n targetReturns = np.linspace(minVol_returns, maxSR_returns, 20)\n for target in targetReturns:\n efficientList.append(efficientOpt(meanReturns, covMatrix, target)['fun'])\n \n maxSR_returns, maxSR_std = round(maxSR_returns*100,2), round(maxSR_std*100,2)\n minVol_returns, minVol_std = round(minVol_returns*100,2), round(minVol_std*100,2)\n \n return maxSR_returns, maxSR_std, maxSR_allocation, minVol_returns, minVol_std, minVol_allocation, efficientList, targetReturns", "def calc_capital_costs (self):\n self.capital_costs = self.max_boiler_output * \\\n self.comp_specs[\"cost per btu/hrs\"]\n #~ print self.capital_costs" ]
[ "0.69873667", "0.64584976", "0.6378536", "0.63241583", "0.63009036", "0.62177217", "0.6204553", "0.6156644", "0.6136159", "0.6083111", "0.6052154", "0.6037782", "0.60245705", "0.60139", "0.5996558", "0.599415", "0.5984706", "0.59628487", "0.59623754", "0.59618145", "0.5958885", "0.59586155", "0.5950948", "0.5950873", "0.5934772", "0.59136856", "0.59029186", "0.58681715", "0.58336", "0.5832624", "0.58272266", "0.58213735", "0.58188057", "0.57751644", "0.5769541", "0.57663155", "0.5762353", "0.5759401", "0.57412857", "0.5728595", "0.5723223", "0.57041794", "0.5699309", "0.5678758", "0.56666136", "0.5664842", "0.5655238", "0.56543535", "0.5652245", "0.5646965", "0.5636909", "0.5628632", "0.5619845", "0.56185585", "0.5610814", "0.56081885", "0.5606616", "0.5604677", "0.5599915", "0.5589228", "0.55867976", "0.5579325", "0.55632204", "0.5551901", "0.55407655", "0.5535061", "0.55305934", "0.55211854", "0.5512364", "0.5507751", "0.5505652", "0.54968727", "0.54934007", "0.5491106", "0.54879206", "0.5482939", "0.54757357", "0.54688835", "0.5467452", "0.54667443", "0.54499084", "0.54488397", "0.54460126", "0.54458845", "0.54440105", "0.5443418", "0.54421204", "0.54352534", "0.54325616", "0.543168", "0.54289436", "0.5426191", "0.54258925", "0.5423373", "0.54188585", "0.54171705", "0.54113233", "0.5410367", "0.5409595", "0.53949964" ]
0.68490684
1
Calculate the Average Diesel load of the current system Attributes
def calc_average_load (self): #~ self.generation = self.forecast.generation_by_type['generation diesel']\ #~ [self.start_year] self.average_load = \ self.forecast.yearly_average_diesel_load.ix[self.start_year]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]\n #~ print 'self.average_load',self.average_load", "def load_stat():\n loadavg = {}\n f = open(\"/proc/loadavg\")\n con = f.read().split()\n f.close()\n loadavg['lavg_1'] = con[0]\n loadavg['lavg_5'] = con[1]\n loadavg['lavg_15'] = con[2]\n loadavg['nr'] = con[3]\n loadavg['last_pid'] = con[4]\n return loadavg", "def totalEffectiveLoad(self):\n return sum(s.effectiveLoad() for s in self.dispatcher.statuses)", "def get_loadavg(cls):\n\n with open(\"/proc/loadavg\") as loadavg:\n loadavg = loadavg.read().split()\n kernel_entities = loadavg[3].split(\"/\")\n loadavg_stat = { StatsKeys.LOADAVG :\n {\n StatsKeys.LAST_1_MIN : float(loadavg[0]),\n StatsKeys.LAST_5_MIN : float(loadavg[1]),\n StatsKeys.LAST_15_MIN : float(loadavg[2]),\n StatsKeys.RUNNABLE_ENTITIES : int(kernel_entities[0]),\n StatsKeys.SCHEDULING_ENTITIES : int(kernel_entities[1])\n }\n }\n logger.debug(\"Loadavg stats: {}\".format(' '.join(loadavg)))\n\n return loadavg_stat", "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def average_performance(self):\n\n print(f\"Average performance: {self.performance / 10}\")", "def getloadavg():\n global _loadavg_inititialized\n\n if not _loadavg_inititialized:\n cext.init_loadavg_counter()\n _loadavg_inititialized = True\n\n # Drop to 2 decimal points which is what Linux does\n raw_loads = cext.getloadavg()\n return tuple([round(load, 2) for load in raw_loads])", "def loadavg():\n sin = psutil.getloadavg()\n return [\n round(sin[0], 3),\n round(sin[1], 3),\n round(sin[2], 3)\n ]", "def load_average(self):\n return _favg(self.load_samples)", "def load_avg():\n \n with open(Path.proc_loadavg()) as f:\n line = f.readline()\n \n load_avgs = [float(x) for x in line.split()[:3]]\n \n return load_avgs", "def avgcpu(self):\n return (self._total_cpu['value'] / self._total_cpu['count']) if self._total_cpu['count'] else 0", "def _avg_performance(bd_dims, BD_directory, run,archive_file_path,max_performance,conversion_func=None,from_fitfile=False):\n path=get_archive_filepath(BD_directory,run, archive_file_path)\n all_performances=get_all_performances(bd_dims, path, conversion_func,from_fitfile)\n return np.mean(all_performances)/max_performance", "def get_avg_load(verbose=False):\n output = run(\"top -d0.5 -n4 | grep Cpu\", quiet=True)\n\n # Strip formatting control characters (top output can have a lot of these)\n output = (output.replace('\\x1b(B','')\n .replace('\\x1b[m','')\n .replace('\\x1b[K','')\n .replace('\\x1b[39;49m',''))\n\n output = output.splitlines()\n\n loads = []\n for i in xrange(len(output)):\n # Top output tends to look like\n # Cpu(s): 2.9%us, 0.0%sy, 0.0%ni, ... OR\n # Cpu(s): 2.9% us, 0.0% sy, 0.0% ni, ... OR\n # %Cpu(s): 2.9 us, 0.0 sy, 0.0 ni, ...\n # We use a regex to match the floating point value for percentage load\n regex = re.compile(\n \"\"\"\n .*Cpu\\(s\\): # any chars before \"Cpu(s):\"\n \\s* # any amount of whitespace\n (\\d*.?\\d*) # any digits, <= 1 period, any digits (i.e. any positive float)\n \\s* # any amount of whitespace\n %? # <= 1 percent symbol (some versions of top just have one \"%\" on this line, before \"Cpu(s)\"\n \\s* # any amount of whitespace\n us # total system load appears to be marked \"us\"\n \"\"\", re.VERBOSE)\n\n matches = regex.findall(output[i])\n #print(repr(output[i]))\n if (len(matches) == 1):\n load = float(matches[0])\n loads.append(load)\n else:\n print(\"Error: On host = {Host}, unable to match total cpu load in string\\n{Output}\"\n .format(Host = env.host, Output = output[i]))\n\n # Throw out the first record of CPU load because it always seems to spike\n # briefly after the command is issued.\n loads = loads[1:]\n avg_load = None\n if len(loads) != 0:\n avg_load = sum(loads)/float(len(loads))\n else:\n print(\"Error: On host = {Host}, len(loads) == 0\"\n .format(Host = env.host))\n\n if (verbose):\n print(\"{Host:4} | Average load: {Load:3.2f}%\".format(Host=env.host, Load=avg_load))\n\n return avg_load", "def get_load_factor(self):\n # Your code here\n return self.count/len(self.data)", "def _find_average_age():\r\n count, total = 0, 0\r\n for resource in resources:\r\n patient = resource[\"resource\"]\r\n if \"birthDate\" in patient:\r\n count += 1\r\n dob = patient[\"birthDate\"].split(\"-\")\r\n dob = datetime(int(dob[0]), int(dob[1]), int(dob[2]), 0, 0, 0, 0)\r\n if \"deceasedDateTime\" in patient:\r\n death_time = patient[\"deceasedDateTime\"].split(\"T\")[0].split(\r\n \"-\")\r\n death_time = datetime(int(death_time[0]), int(death_time[1]),\r\n int(death_time[2]), 0, 0, 0, 0)\r\n else:\r\n death_time = datetime.now()\r\n age = relativedelta(death_time, dob).years\r\n total += age\r\n if count == 0:\r\n return count, count\r\n return total / count, count", "def average_level(self):\n spl = [utils.dbspl(x) for x in self.load_files()]\n return np.mean(spl), np.std(spl)", "def get_load_avg():\n \n with open('/proc/loadavg') as f:\n line = f.readline()\n \n return [float(x) for x in line.split()[:3]]", "def get_patient_average():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/average/2\")\n print(r.text)", "def ram_average(self):\n return _favg(self.ram_samples)", "def average(self):\n return self.properties.get('average')", "def get_fiber_density_average():\n return Global_Module.global_fiber_density_with_average", "def CountRandomLoadRate(self):\n\t\treturn self._get_attribute('countRandomLoadRate')", "def average(self):\n total = 0\n for t in self.memory:\n total += t.reward\n return total/self.__len__()", "def load_list(self):\n import numpy.distutils.proc as numpy_proc\n res = self.apply(numpy_proc.load_avg,())\n return res", "def get_average_age(self):\n return np.mean([agent.age for agent in self.agents])", "def global_efficiency(self, node_list1, node_list2, link_attribute=None):\n local_efficiency = self.local_efficiency(node_list1, node_list2,\n link_attribute)\n return 1/np.mean(local_efficiency)", "def total_experiment_load():\n loads = tempfeeder_exp()\n return total_load_in_experiment_periods(loads, loads.user_ids)", "def DAM(self):\n return self.get_class_average(self.DAM_class_level)", "def average_speed(self):\n return self._average_speed", "def getAvg(self):\r\n\t\treturn self.data['avg']", "def get_system_load(self, interval, time_period, resource):\n\n interval = int(interval)\n time_period = int(time_period)\n stats = []\n\n # get running time in minutes, div by interval plus 1 sec for network baseline\n num_of_polls = int((time_period * 60) / (interval + 1))\n i = 0\n # get the average for minimum for time period, before dropping the oldest values\n while i < num_of_polls:\n if resource == 'cpu':\n stats.append(sysmon.get_cpu_utilisation())\n elif resource == 'memory':\n stats.append(sysmon.get_memory_usage())\n elif resource == 'network':\n stats.append(sysmon.get_network_interface_traffic(INTERFACE))\n time.sleep(interval)\n i += 1\n return stats", "def get_leg_average():\n animals = [json.loads(rd.get(key)) for key in rd.keys(\"*\")]\n legs = [animal[\"legs\"] for animal in animals]\n return jsonify(sum(legs) / len(legs))", "def hotaverage( names):\n rs = radioastronomy.Spectrum() # create input and average structures\n nhot = 0\n\n avenames = names # create a list of files to average\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'HOT': # speed up by only looking at hot load files\n continue\n \n rs.read_spec_ast(filename)\n\n if rs.telel > 0: # only working with hot load, skip elevation > 0.\n continue\n\n avenames[nhot] = filename\n nhot = nhot + 1\n # end of for all files loop\n\n nhot, hot = average( avenames[0:nhot]) # now use generic program for averages\n if nhot < 1:\n print 'No hot load files; can not calibrate!'\n exit()\n\n return nhot, hot", "def get_runs_to_average(self):\n\n if Test.performance_params: return int(Test.performance_params[1])\n elif self._check_performance: return self._runs_to_average\n else: return None", "def StepStepLoadRate(self):\n\t\treturn self._get_attribute('stepStepLoadRate')", "def calculate(data, data_top):\n size, intensity, age = np.array([data[\"Size\"]]), np.array([data[\"Intensity\"]]), data_top.iat[1,0]\n size_avg, intensity_avg = np.average(size), np.average(intensity)\n return size_avg, intensity_avg, age", "def calculate_system_performance(self):\n\n self._calculate_high_order_wfe()\n self._calculate_strehl()", "def get_random_baseline_performance(self, iterations=10):\n random_policy = random_tf_policy.RandomTFPolicy(self._train_env.time_step_spec(),\n self._train_env.action_spec())\n\n return compute_avg_return(self._train_env, random_policy, iterations)", "def averageTime(self):\n \n pass", "def InitialStepLoadRate(self):\n\t\treturn self._get_attribute('initialStepLoadRate')", "def weight(self):\n counters = [\n (\"total_mhz\", self.dominfo.vms_online + self.dominfo.cpus_online / 4.0),\n (\"memory\", self.dominfo.vms_online + self.dominfo.ram_online / 4096.0),\n ]\n load_w = sum((self.node[k] / float(v or 1)) / self.node[k] for k, v in counters)\n return load_w * self.srv_weight", "def _get_network_utilization(self):\n options = self.scenario_cfg[\"options\"]\n interval = options.get('interval', 1)\n count = options.get('count', 1)\n\n cmd = \"sudo sar -n DEV %d %d\" % (interval, count)\n\n raw_result = self._execute_command(cmd)\n result = self._filtrate_result(raw_result)\n\n return result", "def avg_num_visits_patient(self):\n pass", "def average_damage(self) -> float:\r\n number_of_dice = int(self.damage.split(\"d\")[0])\r\n damage_of_dice = int(self.damage.split(\"d\")[1])\r\n average_damage = (number_of_dice + number_of_dice * damage_of_dice) / 2\r\n return average_damage", "def discharge_average(self):\n return self._discharge_average", "def getAverage(die, numRolls, numTrials):", "def avg_hops(self):\n return self._avg_hops", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def local_efficiency(self, node_list1, node_list2, link_attribute=None):\n path_lengths = self.cross_path_lengths(node_list1, node_list2,\n link_attribute)\n return np.mean(1/path_lengths, axis=1)", "def calcDVavg(supplyvol, demandvol):\n dvavg = (supplyvol - demandvol)/(0.5 * (supplyvol + demandvol))\n return dvavg", "def cf_mean(self):\n return self['capacity_factor'] / 100", "def cf_mean(self):\n return self['capacity_factor'] / 100", "def StepIncrementLoadRate(self):\n\t\treturn self._get_attribute('stepIncrementLoadRate')", "def calculate(self):\n\n rating = 0\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n rating += getattr(self, item, 0)\n\n self.overall = (rating / self.total) / .2", "def avarage_for_group(data: Dict[int, int]) -> float:\n values = data.values()\n summary = sum(values)\n return summary // len(data)", "def test_load_avg_1():\n result = _run_metric('load_avg_1')\n assert result.exit_code == 0", "def scenario_average_price_rule(_m, y, s):\r\n\r\n return m.SCENARIO_REVENUE[y, s] / m.SCENARIO_DEMAND[y, s]", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def avg_added(self):\n avg = {}\n for path, lines in self.lines_added.items():\n avg[path] = round(statistics.mean(lines))\n\n return avg", "def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)", "def average(self):\n return (self.current + self.last) / 2.0", "def calculate_dataset_metrics(self):\n pass", "def average(self):\n return self.summation() / self.count()", "def test_load_avg_5():\n result = _run_metric('load_avg_5')\n assert result.exit_code == 0", "def driver_statistics(self):\n df_feature_ = self.df_feature[~self.df_feature[0].values][['LABEL', 1, 2, 6, 7, 11, 12, 16, 17, 21, 22, 26, 27]]\n df_feature_.columns = ['LABEL', 'Speed_Mean', 'Speed_Var', \"Ac_Mean\", \"Ac_Var\", \"Dc_Mean\", \"Dc_Var\", \n 'Steer_Speed_Mean', 'Steer_Speed_Var', \"Steer_Ac_Mean\", \"Steer_Ac_Var\", \"Steer_Dc_Mean\", \"Steer_Dc_var\"]\n df_feature_['DISTANCE'] = self.df[~self.df_feature[0].values]['DISTANCE']\n return df_feature_.groupby('LABEL').mean()", "def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)", "def global_average_speed(cars):\n velocities = [car.velocity for car in cars]\n average_speed = sum(velocities)/len(cars)\n return average_speed", "def compute(self):\n rsa = self._session_graph.get_graph_property(self._FACTOR_KEY)\n rsa = rsa if rsa else 0.\n tr = self._session_graph.graph.num_edges()\n tr = tr if tr > 0 else 1\n rs = self._traffic_record['response_size']\n rsa = ((float(rsa) * (float(tr) - 1.)) + float(rs)) / float(tr)\n self.append_graph_factor('float', rsa)\n\n print \"Response Size Average : \", rsa\n pass", "def getLoad(self) -> float:\n return self.load", "def availability(self):\n if len(self.nodes) == 0:\n return 0.0\n values = map(lambda n: n.availability, self.nodes)\n return mean(values)", "def avg_convergence_rate(request):\n t = StaticMicrotask.objects.all().filter(scoring_done=True).aggregate(Avg('hop_count'),Max('hop_count'),Min('hop_count'))\n \n avg_hop_count = t['hop_count__avg']\n max_hop_count = t['hop_count__max']\n min_hop_count = t['hop_count__min']\n \n data = {\n 'avg_hop_count':avg_hop_count,\n 'max_hop_count':max_hop_count,\n 'min_hop_count':min_hop_count\n }\n return render_to_response('my_admin_tools/menu/avg_convergence_rate.html',data,context_instance=RequestContext(request))", "def estimate_brightness(self):\n\n intensity = self._get_intensity()\n self.avg_standard_lum = np.sum(intensity) / (self.img_height * self.img_width)\n return self.avg_standard_lum", "def mdAveragePropertiesList(self):\n\t\tpass", "def avg_latency(self):\n return self._avg_latency", "def avg_latency(self):\n return self._avg_latency", "def compute_utilization(self) -> float:\r\n return self._compute_utilization", "def getAvg(self):\r\n\t\tdata = self.pair.data\r\n\t\tif data['avg'] == None:\r\n\t\t\treturn None\r\n\t\treturn 1. / self.pair.data['avg']", "def InitialIncrementLoadRate(self):\n\t\treturn self._get_attribute('initialIncrementLoadRate')", "def calculateAverage(self, data):\n\n nValidTrials = data['nValid'][-1]\n nRewardTrials = data['nRewarded'][-1]\n return float(nRewardTrials)/nValidTrials", "def calculate_average_run_accuracy(self):\n overall_true_rate, true_positive_rate, true_negative_rate, false_positive_rate, false_negative_rate, true_positive_rate_cutoff, true_negative_rate_cutoff, \\\n false_positive_rate_cutoff, false_negative_rate_cutoff, unclassified_cutoff, matthews_correlation_coefficient, brier_score, auc_score, fit_time, hmeasure = [0] * 15\n balanced_accuracy_arr = []\n auc_arr = []\n hmeasure_arr = []\n brier_score_arr = []\n fit_time_arr = []\n mcc_arr = []\n true_positive_arr = []\n true_negative_arr = []\n false_positive_arr = []\n false_negative_arr = []\n\n count = 0\n for result_dictionary in self.errors:\n for z in range(len(result_dictionary[\"balanced_accuracy_arr\"])):\n overall_true_rate += result_dictionary[\"balanced_accuracy_arr\"][z]\n true_positive_rate += result_dictionary[\"true_positive_rate_arr\"][z]\n true_negative_rate += result_dictionary[\"true_negative_rate_arr\"][z]\n false_positive_rate += result_dictionary[\"false_positive_rate_arr\"][z]\n false_negative_rate += result_dictionary[\"false_negative_rate_arr\"][z]\n matthews_correlation_coefficient += result_dictionary[\"mcc_arr\"][z]\n auc_score += result_dictionary[\"auc_arr\"][z]\n brier_score += result_dictionary[\"brier_score_arr\"][z]\n fit_time += result_dictionary[\"fit_time_arr\"][z]\n hmeasure += result_dictionary[\"hmeasure_arr\"][z]\n count += 1\n\n true_positive_rate_cutoff += result_dictionary[\"avg_true_positive_rate_with_prob_cutoff\"]\n true_negative_rate_cutoff += result_dictionary[\"avg_true_negative_rate_with_prob_cutoff\"]\n false_positive_rate_cutoff += result_dictionary[\"avg_false_positive_rate_with_prob_cutoff\"]\n false_negative_rate_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n unclassified_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n balanced_accuracy_arr += result_dictionary[\"balanced_accuracy_arr\"]\n hmeasure_arr += result_dictionary[\"hmeasure_arr\"]\n auc_arr += result_dictionary[\"auc_arr\"]\n brier_score_arr += result_dictionary[\"brier_score_arr\"]\n fit_time_arr += result_dictionary[\"fit_time_arr\"]\n mcc_arr += result_dictionary[\"mcc_arr\"]\n true_positive_arr += result_dictionary[\"true_positive_rate_arr\"]\n true_negative_arr += result_dictionary[\"true_negative_rate_arr\"]\n false_positive_arr += result_dictionary[\"false_positive_rate_arr\"]\n false_negative_arr += result_dictionary[\"false_negative_rate_arr\"]\n\n avg_run_results = [None] * 31\n avg_run_results[0] = matthews_correlation_coefficient / float(count)\n avg_run_results[1] = brier_score / float(count)\n avg_run_results[2] = overall_true_rate / float(count)\n avg_run_results[3] = true_positive_rate / float(count)\n avg_run_results[4] = true_negative_rate / float(count)\n avg_run_results[5] = false_positive_rate / float(count)\n avg_run_results[6] = false_negative_rate / float(count)\n avg_run_results[7] = true_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[8] = true_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[9] = false_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[10] = false_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[11] = unclassified_cutoff / float(len(self.errors))\n avg_run_results[12] = fit_time / float(count)\n avg_run_results[14] = balanced_accuracy_arr\n avg_run_results[15] = auc_score / float(count)\n avg_run_results[16] = auc_arr\n avg_run_results[17] = brier_score_arr\n avg_run_results[18] = fit_time_arr\n avg_run_results[19] = mcc_arr\n avg_run_results[13] = self.calculate_std_deviation(balanced_accuracy_arr)\n avg_run_results[20] = self.calculate_std_deviation(mcc_arr)\n avg_run_results[21] = self.calculate_std_deviation(brier_score_arr)\n avg_run_results[22] = self.calculate_std_deviation(auc_arr)\n avg_run_results[23] = self.calculate_std_deviation(fit_time_arr)\n avg_run_results[24] = self.calculate_std_deviation(true_positive_arr)\n avg_run_results[25] = self.calculate_std_deviation(true_negative_arr)\n avg_run_results[26] = self.calculate_std_deviation(false_positive_arr)\n avg_run_results[27] = self.calculate_std_deviation(false_negative_arr)\n avg_run_results[28] = hmeasure / float(count)\n avg_run_results[29] = self.calculate_std_deviation(hmeasure_arr)\n avg_run_results[30] = hmeasure_arr\n\n return avg_run_results", "def get_total_n_cpu(self) -> int:", "def effectiveness(self):\n self._effectiveness = 0.20 * self.ANA + 0.20 * self.DAM + 0.20 * self.MOA + 0.20 * self.MFA + 0.20 * self.NOP\n return round(self._effectiveness, 5)", "def test_load_avg_15():\n result = _run_metric('load_avg_15')\n assert result.exit_code == 0", "def total_sdram_requirements(self):", "def LoadRateValue(self):\n\t\treturn self._get_attribute('loadRateValue')", "def get_load_data():\n proc_stat = open(\"/proc/stat\", \"r\")\n ret = []\n #times_since_startup = proc_stat.readline().strip().split()[1:]\n for line in proc_stat:\n line_split = line.strip().split()\n if(not (\"cpu\" in line_split[0])): #we have gone past the CPU lines\n break\n else:\n #everything but the label since we know [0] is overall and after that is per core by index\n ret.append(line_split[1:]) \n proc_stat.close()\n return ret", "def average_distance(self):\r\n total = 0\r\n edges = 0\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n total += edge.distance\r\n edges += 1\r\n return total / edges", "def avg_extend_time(self):\r\n if self.total_extended:\r\n return self.total_extend_time/self.total_extended\r\n else: return 0", "def averageDominationCount(leaf):\n averageDominationCount = np.nanmean(leaf.calDominationCount())\n return averageDominationCount", "def class_average(examples):\n averages = np.zeros((1, attribute_count))\n\n # if we have no examples, then we'll just end early\n if len(examples) == 0:\n return averages\n\n for ex in examples:\n averages += ex.row\n\n return averages / len(examples)", "def average(self):\n\n x = list(zip(*self.memory))\n states = list(x[0])\n actions = list(x[1])\n \n downsampled_states = resample(states , self.output_size-1)\n downsampled_actions = resample(actions, self.output_size-1)\n\n return downsampled_states, downsampled_actions", "def average_age():\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n ages = []\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n age = row[\"Age_ses1\"]\n if not math.isnan(age):\n ages.append(age)\n\n print(\"------ Age ------\")\n print_stats(ages)", "def calculate_sum_of_all_attributes(self):\n\n sum = 0\n\n for key, val in self.__dict__.items():\n\n if isinstance(val, (int, float)):\n sum += val\n\n return sum", "def averaged_risk(self):\n return self._averaged_risk", "def averaged_risk(self):\n return self._averaged_risk", "def _avg(cls, l):\n\n return sum(l) / float(len(l))", "def cf_mean_ac(self):\n return self['capacity_factor_ac'] / 100", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def get_average(self, samples=50):\n first = self.layers[0].load_image()\n res = np.zeros(first.shape, dtype=float)\n intervals = len(self.layers)/samples\n for l in self.layers[::int(intervals)]:\n img = l.load_image().astype(float)\n res += img\n l.image = None\n return samples**-1*res", "def _sample_load(proc):\n return 0.01 * _for_process_and_descendants(\n psutil.Process.get_cpu_percent,\n proc,\n )" ]
[ "0.7328244", "0.64413404", "0.6380107", "0.63506234", "0.6312014", "0.6299931", "0.618164", "0.6149521", "0.6113699", "0.608994", "0.6078722", "0.59945375", "0.5977917", "0.5886562", "0.5843336", "0.5759978", "0.5727464", "0.565973", "0.5629968", "0.56079954", "0.5549923", "0.5545292", "0.5543038", "0.5536138", "0.55217427", "0.5501296", "0.5498026", "0.5487671", "0.54786307", "0.5467715", "0.5441438", "0.54408777", "0.5439412", "0.5427769", "0.5423653", "0.5411402", "0.5410965", "0.5407039", "0.540569", "0.53999317", "0.538792", "0.5372807", "0.5372371", "0.53715706", "0.53663224", "0.53570294", "0.5355454", "0.5352177", "0.5351911", "0.5333133", "0.5328123", "0.5328123", "0.5327252", "0.531356", "0.53049105", "0.52697664", "0.52682775", "0.5267318", "0.52655274", "0.5261054", "0.5257406", "0.5257277", "0.52483255", "0.5242019", "0.52391195", "0.523553", "0.52331066", "0.5229411", "0.5229236", "0.52251416", "0.5208578", "0.52084893", "0.520084", "0.52006614", "0.52006614", "0.5197727", "0.51975214", "0.51951295", "0.5195025", "0.5190774", "0.5187825", "0.5184196", "0.5184039", "0.51810706", "0.51790476", "0.5174144", "0.51709247", "0.5166379", "0.51657826", "0.5155754", "0.51434326", "0.51389635", "0.51377845", "0.5127542", "0.5127542", "0.51255226", "0.5125063", "0.511858", "0.5117246", "0.5112517" ]
0.7217842
1
Get values from the community being connected to (second community)
def get_intertie_values (self): #~ print self.new_intertie_data.get_item('community','model as intertie') if self.new_intertie_data is None: raise ValueError, "No community to intertie to" self.connect_to_intertie = \ self.new_intertie_data.get_item('community','model as intertie') self.intertie_generation_efficiency = \ self.new_intertie_data.get_item( 'community', 'diesel generation efficiency' ) it_diesel_prices = self.new_intertie_data.get_item( 'community', 'diesel prices' ) it_diesel_prices.index = it_diesel_prices.index.astype(int) #~ print it_diesel_prices.ix[self.start_year:self.end_year] self.intertie_diesel_prices = \ it_diesel_prices.ix[self.start_year:self.end_year].values.T[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_communities(self):\n return self.__communities", "def get_communities(self):\n return self._communities.values()", "def get_communalities(self):\n df_communalities = pd.DataFrame(self.fa.get_communalities()).set_index(self.df.columns)\n if self.verbose:\n print(f'Communalities\\n{df_communalities}\\n')\n return df_communalities", "def get_communities(browser: RoboBrowser, desired_communities: list):\n browser.open(URL_BASE + '/info/profil/meinetipprunden')\n content = get_kicktipp_content(browser)\n links = content.find_all('a')\n def gethreftext(link): return link.get('href').replace(\"/\", \"\")\n\n def is_community(link):\n hreftext = gethreftext(link)\n if hreftext == link.get_text():\n return True\n else:\n linkdiv = link.find('div', {'class': \"menu-title-mit-tippglocke\"})\n return linkdiv and linkdiv.get_text() == hreftext\n community_list = [gethreftext(link)\n for link in links if is_community(link)]\n if len(desired_communities) > 0:\n return intersection(community_list, desired_communities)\n return community_list", "def community(self):\n return self._community", "def communities(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"communities\")", "def add_communites(self):\n\n query = '''\n MATCH (c1:)-[r:INTERACTS]->(c2:)\n RETURN c1.name, c2.name, r.weight AS weight\n '''\n ig = IGraph.TupleList(self.graph.run(query), weights=True)\n\n clusters = IGraph.community_walktrap(ig, weights=\"weight\").as_clustering()\n\n nodes = [{\"name\": node[\"name\"]} for node in ig.vs]\n for node in nodes:\n idx = ig.vs.find(name=node[\"name\"]).index\n node[\"community\"] = clusters.membership[idx]\n\n write_clusters_query = '''\n UNWIND {nodes} AS n\n MATCH (c:) WHERE c.name = n.name\n SET c.community = toInt(n.community)\n '''\n\n self.graph.run(write_clusters_query, nodes=nodes)", "def find_and_print_network_communities(G, code_dict=None):\n\n comm_dict = partition(G)\n\n comm_members = {}\n for comm in set(comm_dict.values()):\n countries = [node for node in comm_dict if comm_dict[node] == comm]\n if code_dict is not None:\n countries = [code_dict[code] for code in countries]\n\n comm_members[comm] = countries\n\n return comm_members, get_modularity(G, comm_dict)", "def get_community(self, cid: str) -> list:\n\n t, cid = cid.split(\"_\")\n coms = self.get_clustering_at(int(t))\n return coms.communities[int(cid)]", "def assign_communities(graph):\n communities = nx.algorithms.community\\\n .greedy_modularity_communities(nx.Graph(graph))\n for node in graph.nodes:\n graph.nodes[node]['community'] = [i for i,c in enumerate(communities)\n if node in c][0]\n graph.graph['modularity'] = nx.algorithms.community.quality\\\n .modularity(nx.Graph(graph),\n communities)", "def find_local_community(G, seed_node, weight, debug_log=False):\n nodes_in_community = seed_node if isinstance(seed_node, list) else [seed_node]\n modularity = edge_modularity(G, nodes_in_community=nodes_in_community, weight=weight)\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n if debug_log:\n print('==========\\nInitial community has nodes:', nodes_in_community)\n print('Neighbor edges:', neighbor_edges)\n print('Modularity = %f' % modularity)\n while neighbor_edges:\n # Compute the edge_modularity for each neighbor edge,\n # suppose the neighbor edge is added to the community\n mod_max, c_max, e_max = 0, None, None\n for e in neighbor_edges:\n # edges in the current community\n edges_in_temp_community = list(G.subgraph(nodes_in_community).edges)\n # append the candidate edge\n edges_in_temp_community.append(e)\n nodes_in_temp_community = list(G.edge_subgraph(edges_in_temp_community).nodes)\n mod_temp = edge_modularity(G, nodes_in_community=nodes_in_temp_community, weight=weight)\n if mod_temp > mod_max:\n mod_max, c_max, e_max = mod_temp, nodes_in_temp_community, e\n if mod_max > modularity:\n if debug_log:\n print('==========\\nEdge', e_max, 'and node', set(e_max).difference(nodes_in_community), 'are added to the community')\n\n # Update the community and the corresponding neighbor edges\n nodes_in_community = c_max\n modularity = mod_max\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n\n if debug_log:\n print('The community has nodes:', nodes_in_community)\n print('Modularity = %f' % mod_max)\n print('Neighbor edges:', neighbor_edges)\n else:\n break\n return nodes_in_community, modularity", "def _community(G, u, community):\n node_u = G.node[u]\n try:\n return node_u[community]\n except KeyError:\n raise nx.NetworkXAlgorithmError('No community information')", "def get_secondary_connections(network, user):\n if user not in network:\n return None\n if network[user][0] == []:\n return []\n return [person\n for group in\n [network[connection][0] for connection in network[user][0]]\n for person in group]", "def _get_extended_community(self):\n return self.__extended_community", "def communities(self, config):\n\n raise NotImplementedError", "def getCubes():", "def community_detection(net_G):\r\n if list(nx.isolates(net_G)) == []:\r\n part = community.best_partition(net_G)\r\n #values = [part.get(node) for node in net_G.nodes()]\r\n #nx.draw_spring(net_G, cmap = plt.get_cmap('jet'), node_color = values, node_size=30, with_labels=False)\r\n #plt.show()\r\n else:\r\n net_G = net_G.copy()\r\n net_G.remove_nodes_from(list(nx.isolates(net_G)))\r\n part = community.best_partition(net_G)\r\n list_nodes = []\r\n for com in set(part.values()):\r\n list_nodes.append([nodes for nodes in part.keys() if part[nodes] == com])\r\n num_of_communities = len(list_nodes)\r\n partition_performance = nx.algorithms.community.quality.performance(net_G, list_nodes)\r\n net_communities = [[\"Numbers of communities:\", num_of_communities], \\\r\n [\"Partition performance:\", partition_performance]]\r\n return net_communities", "def GraphToCommunities(Network):\n comm=community.best_partition(Network)\n clusters={}\n for k in comm.keys():\n if clusters.has_key(comm[k])==False:\n clusters[comm[k]]=[]\n clusters[comm[k]].append(k)\n return (clusters)", "def get_connective_values(self):\n values = [[self.truth_value, self.number]]\n if len(self.children) == 1:\n values.extend(self.children[0].get_connective_values())\n elif len(self.children) == 2:\n child = self.children[0].get_connective_values()\n child.extend(values)\n child.extend(self.children[1].get_connective_values())\n values = child\n return values", "def _get_send_community(self):\n return self.__send_community", "def get_local_neighbourhood_composition(self):\n neighbourhood_students = []\n\n #print(\"id, students\",self.unique_id, len(self.neighbourhood_students_indexes))\n neighbourhood_students = self.model.get_households_from_index(self.neighbourhood_students_indexes)\n local_neighbourhood_composition = get_counts_util(neighbourhood_students, self.model)\n #print(\"step \",self.model.schedule.steps,\" neighb students \",len(self.neighbourhood_students))\n\n return (local_neighbourhood_composition)", "def communityGraph(graph):\n\n lapgr = nx.laplacian_matrix(graph)\n\n # Get the eigenvalues and eigenvectors of the Laplacian matrix\n evals, evec = np.linalg.eigh(lapgr.todense())\n\n fiedler = evec[1]\n results = []\n ## \"Fiedler\", fiedler\n median = np.median(fiedler, axis=1) # median of the second eigenvalue\n for i in range(0, fiedler.size): # divide the graph nodes into two\n if(fiedler[0, i] < median):\n results.append(0)\n else:\n results.append(1)\n return results, evals, evec", "def node_community_colors(graph, communities):\n colors = nx_helpers.generate_colors(len(communities))\n\n def which_color(node):\n \"\"\"finds which community node is in and returns\n its corresponding color\n \"\"\"\n for i, com in enumerate(communities):\n if node in com:\n return colors[i]\n return nx_helpers.rgb_to_hex((0, 0, 0))\n\n node_colors = [which_color(node) for node in graph.nodes()]\n return node_colors", "def get_local_neighbourhood_composition(self):\n\n local_neighbourhood_composition = get_counts_util(self.neighbourhood_students, self.model)\n #print(\"step \",self.model.schedule.steps,\" neighb students \",len(self.neighbourhood_students))\n\n return (local_neighbourhood_composition)", "def get_connections(network, user):\n if not user in network:\n return None\n if not 'connections' in network[user]:\n return []\n return network[user]['connections']", "def _find_community(root, graph, visited):\n community = [root]\n visited.add(root)\n next_queue = [root]\n while next_queue:\n node = next_queue.pop(0)\n for child in graph[node]:\n if child not in visited:\n next_queue.append(child)\n community.append(child)\n visited.add(child)\n\n return community", "def neighboring_consumers(self, position_list):\n agent_list = []\n #loop over all neighbors\n for position in position_list:\n agents_in_cell = self.model.grid.get_cell_list_contents(position)\n #loop over all agents in the cell to find if agent is present\n for agent in agents_in_cell:\n if type(agent).__name__ == \"Consumer\":\n agent_list.append(agent)\n \n return agent_list", "def get_connections(network, user):\n if user not in network or network[user][0] == []:\n return None\n return network[user][0]", "def get_explicit_community_match(self) -> list:\n return self.matching", "def get_community_member(self, partition, community_dict, node, kind):\r\n comm = community_dict[partition[node]]\r\n return [x for x in comm if x.startswith(kind)]", "def _community_detection(self, kg: KG) -> None:\n nx_graph = nx.Graph()\n\n for vertex in kg._vertices:\n if not vertex.predicate:\n nx_graph.add_node(str(vertex), vertex=vertex)\n\n for vertex in kg._vertices:\n if not vertex.predicate:\n # Neighbors are predicates\n for pred in kg.get_neighbors(vertex):\n for obj in kg.get_neighbors(pred):\n nx_graph.add_edge(\n str(vertex), str(obj), name=str(pred)\n )\n\n # Create a dictionary that maps the URI on a community\n partition = community.best_partition(\n nx_graph, resolution=self.resolution\n )\n self.labels_per_community = defaultdict(list)\n\n self.communities = {}\n vertices = nx.get_node_attributes(nx_graph, \"vertex\")\n for node in partition:\n if node in vertices:\n self.communities[vertices[node]] = partition[node]\n\n for node in self.communities:\n self.labels_per_community[self.communities[node]].append(node)", "def nodes(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].nodes.values()])", "def get_secondary_connections(network, user):\n if user not in network:\n return None\n if network[user]['connections'] != []:\n result = []\n for conn in get_connections(network, user):\n for conn_2 in get_connections(network, conn):\n if conn_2 not in result:\n result.append(conn_2)\n return result\n return []", "def get_nodes_and_edges(self, conductor_graph_instance):\n nodes = conductor_graph_instance.concert_clients.values()\n edges = []\n important_states = [\n concert_msgs.ConcertClientState.MISSING,\n concert_msgs.ConcertClientState.AVAILABLE\n ]\n for node in nodes:\n if node.msg.state in important_states: # and node.msg.conn_stats.gateway_available:\n edges.append(Edge(\"conductor\", node.concert_alias, node.link_type))\n return (nodes, edges)", "def _get_nodes(self):\n viewpoint = \"shiva_{}\".format(cherrypy.session[\"id\"])\n messages_db = self.mongo[viewpoint][\"messages\"]\n people_db = self.mongo[viewpoint][\"people\"]\n #\n senders = messages_db.distinct(\"sender\")\n owner_id = cherrypy.session[\"id\"]\n nodes = list()\n for sender in senders:\n person = people_db.find_one({\"id\": sender})\n if person is None:\n name = \"id{}\".format(sender)\n else:\n name = person[\"display_name\"]\n records = list(messages_db.aggregate([{\n \"$match\": {\n \"$or\": [\n {\"sender\": owner_id, \"receiver\": sender},\n {\"sender\": sender, \"receiver\": owner_id}\n ]\n }\n }, {\"$group\": {\"_id\": None, \"count\": {\"$sum\": 1}}}]))\n if not records:\n records = 0\n else:\n records = records[0][\"count\"]\n info = \"Total records: {}\".format(records)\n history_link = \"/vk/read?id={}\".format(sender)\n statistics_link = \"#\"\n if records > 0:\n nodes.append({\n \"id\": sender,\n \"name\": name,\n \"info\": info,\n \"records\": records,\n \"history_link\": history_link,\n \"statistics_link\": statistics_link\n })\n #\n return nodes", "def _get_cluster_components(self):\n print(\"Connecting to cluster...\")\n self.cluster.connect_to_cluster()\n print(\"Connected!\")\n print(\"Collecting information from the cluster...\")\n return self.cluster.get_components()", "def __call__(self) -> list:\n return self.network", "def connecting(node1, node2):\n comp_list = []\n \"\"\":type : list[components.Component]\"\"\"\n if node1 == node2:\n return []\n for comp in node1.connected_comps:\n if comp.neg == node2:\n comp_list.append(comp)\n elif comp.pos == node2:\n comp_list.append(comp)\n return comp_list", "def get_connection_genes(key, config):\n gene1 = ConnectionGene(key, config)\n gene1.enabled = True\n gene1.weight = 0\n gene2 = ConnectionGene(key, config)\n gene2.enabled = False\n gene2.weight = 1\n return gene1, gene2", "def testProtractedCommunityParametersStored(self):\n self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8], self.c.get_community_references())\n self.assertEqual(\n 1, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=100)\n )\n self.assertEqual(\n 2, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=100)\n )\n self.assertEqual(\n 3, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=25, max_speciation_gen=100)\n )\n self.assertEqual(\n 4, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=25, max_speciation_gen=100)\n )\n self.assertEqual(\n 5, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=200)\n )\n self.assertEqual(\n 6, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=200)\n )\n self.assertEqual(\n 7, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=0.0, max_speciation_gen=2000)\n )\n self.assertEqual(\n 8, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=0.0, max_speciation_gen=2000)\n )\n ed1 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 100,\n }\n ed2 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 100,\n }\n ed3 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 25,\n \"max_speciation_gen\": 100,\n }\n ed4 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 25,\n \"max_speciation_gen\": 100,\n }\n ed5 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 200,\n }\n ed6 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 200,\n }\n ed7 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 0.0,\n \"max_speciation_gen\": 2000,\n }\n ed8 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 0.0,\n \"max_speciation_gen\": 2000,\n }\n com1_dict = self.c.get_community_parameters(1)\n com2_dict = self.c.get_community_parameters(2)\n com3_dict = self.c.get_community_parameters(3)\n com4_dict = self.c.get_community_parameters(4)\n com5_dict = self.c.get_community_parameters(5)\n com6_dict = self.c.get_community_parameters(6)\n com7_dict = self.c.get_community_parameters(7)\n com8_dict = self.c.get_community_parameters(8)\n self.assertEqual(ed1, com1_dict)\n self.assertEqual(ed2, com2_dict)\n self.assertEqual(ed3, com3_dict)\n self.assertEqual(ed4, com4_dict)\n self.assertEqual(ed5, com5_dict)\n self.assertEqual(ed6, com6_dict)\n self.assertEqual(ed7, com7_dict)\n self.assertEqual(ed8, com8_dict)", "def testProtractedCommunityParametersStored(self):\n self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8], self.c.get_community_references())\n self.assertEqual(\n 1, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=100)\n )\n self.assertEqual(\n 2, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=100)\n )\n self.assertEqual(\n 3, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=25, max_speciation_gen=100)\n )\n self.assertEqual(\n 4, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=25, max_speciation_gen=100)\n )\n self.assertEqual(\n 5, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=200)\n )\n self.assertEqual(\n 6, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=200)\n )\n self.assertEqual(\n 7, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=0.0, max_speciation_gen=2000)\n )\n self.assertEqual(\n 8, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=0.0, max_speciation_gen=2000)\n )\n ed1 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 100,\n }\n ed2 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 100,\n }\n ed3 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 25,\n \"max_speciation_gen\": 100,\n }\n ed4 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 25,\n \"max_speciation_gen\": 100,\n }\n ed5 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 200,\n }\n ed6 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 200,\n }\n ed7 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 0.0,\n \"max_speciation_gen\": 2000,\n }\n ed8 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 0.0,\n \"max_speciation_gen\": 2000,\n }\n com1_dict = self.c.get_community_parameters(1)\n com2_dict = self.c.get_community_parameters(2)\n com3_dict = self.c.get_community_parameters(3)\n com4_dict = self.c.get_community_parameters(4)\n com5_dict = self.c.get_community_parameters(5)\n com6_dict = self.c.get_community_parameters(6)\n com7_dict = self.c.get_community_parameters(7)\n com8_dict = self.c.get_community_parameters(8)\n self.assertEqual(ed1, com1_dict)\n self.assertEqual(ed2, com2_dict)\n self.assertEqual(ed3, com3_dict)\n self.assertEqual(ed4, com4_dict)\n self.assertEqual(ed5, com5_dict)\n self.assertEqual(ed6, com6_dict)\n self.assertEqual(ed7, com7_dict)\n self.assertEqual(ed8, com8_dict)", "def testProtractedCommunityParametersStored(self):\n self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8], self.c.get_community_references())\n self.assertEqual(\n 1, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=100)\n )\n self.assertEqual(\n 2, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=100)\n )\n self.assertEqual(\n 3, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=25, max_speciation_gen=100)\n )\n self.assertEqual(\n 4, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=25, max_speciation_gen=100)\n )\n self.assertEqual(\n 5, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=200)\n )\n self.assertEqual(\n 6, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=50, max_speciation_gen=200)\n )\n self.assertEqual(\n 7, self.c.get_community_reference(0.1, 0.0, False, 0, 0.0, min_speciation_gen=0.0, max_speciation_gen=2000)\n )\n self.assertEqual(\n 8, self.c.get_community_reference(0.2, 0.0, False, 0, 0.0, min_speciation_gen=0.0, max_speciation_gen=2000)\n )\n ed1 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 100,\n }\n ed2 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 100,\n }\n ed3 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 25,\n \"max_speciation_gen\": 100,\n }\n ed4 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 25,\n \"max_speciation_gen\": 100,\n }\n ed5 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 200,\n }\n ed6 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 50,\n \"max_speciation_gen\": 200,\n }\n ed7 = {\n \"speciation_rate\": 0.1,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 0.0,\n \"max_speciation_gen\": 2000,\n }\n ed8 = {\n \"speciation_rate\": 0.2,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 0.0,\n \"max_speciation_gen\": 2000,\n }\n com1_dict = self.c.get_community_parameters(1)\n com2_dict = self.c.get_community_parameters(2)\n com3_dict = self.c.get_community_parameters(3)\n com4_dict = self.c.get_community_parameters(4)\n com5_dict = self.c.get_community_parameters(5)\n com6_dict = self.c.get_community_parameters(6)\n com7_dict = self.c.get_community_parameters(7)\n com8_dict = self.c.get_community_parameters(8)\n self.assertEqual(ed1, com1_dict)\n self.assertEqual(ed2, com2_dict)\n self.assertEqual(ed3, com3_dict)\n self.assertEqual(ed4, com4_dict)\n self.assertEqual(ed5, com5_dict)\n self.assertEqual(ed6, com6_dict)\n self.assertEqual(ed7, com7_dict)\n self.assertEqual(ed8, com8_dict)", "def get_relevant(self, msg):\n nick = msg.GetNick()\n net = msg.GetNetwork()\n data = {\"body\": msg.GetText(),\n \"network\": net.GetName(),\n \"away\": net.IsIRCAway(),\n \"client_count\": len(net.GetClients()),\n \"nick\": nick.GetNick(),\n \"ident\": nick.GetIdent(),\n \"host\": nick.GetHost(),\n \"hostmask\": nick.GetHostMask()}\n chan = msg.GetChan()\n if chan:\n data[\"context\"] = data[\"channel\"] = chan.GetName()\n data[\"detached\"] = chan.IsDetached()\n else:\n data[\"context\"] = data[\"nick\"]\n return data", "def get_mn_info(self):\n\t\treturn self._infoCommonMuscleConnections, self._infoSpecialConnections", "def sinks(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if partner == self.right:\n partner -= 1\n else:\n partner = self.left + (self.rank - self.midpoint)\n if partner == self.midpoint:\n partner -= 1\n\n return {partner}", "def get_snmp_information(self):\n\n snmp_output = self._send_command('/snmp print')\n snmp_community_output = self._send_command(\n '/snmp community print terse')\n\n snmp = parse_output(snmp_output)\n community_list = parse_terse_output(snmp_community_output)\n\n community = {}\n\n for item in community_list:\n community.setdefault(item.get('name'), {\n 'acl': item.get('addresses'),\n 'mode': u'rw' if item.get('write-access') == 'yes' else u'ro'\n })\n\n return {\n 'contact': snmp.get('contact'),\n 'location': snmp.get('location'),\n 'community': community,\n 'chassis_id': ''\n }", "def get_source_and_sink_comps(\n graph_client: GremlinClient, topology_id: str, topology_ref: str\n) -> Dict[str, List[str]]:\n\n sgt: GraphTraversalSource = graph_client.topology_subgraph(\n topology_id, topology_ref\n )\n\n sources: List[str] = sgt.V().where(\n in_(\"logically_connected\").count().is_(0)\n ).values(\"component\").dedup().toList()\n\n sinks: List[str] = sgt.V().where(out(\"logically_connected\").count().is_(0)).values(\n \"component\"\n ).dedup().toList()\n\n return {\"sources\": sources, \"sinks\": sinks}", "def getSlaveVocabComuni(master2):\n results=[]\n lista=[]\n tupla=()\n for a in mapDisplayList(EV.allProvince()):\n if master2 == a[1]:\n results = [a for a in EV.comuni4provincia(a[0])]\n for result in results:\n tupla = (result.comune)\n lista.append(tupla)\n return lista", "def cluster_connectivity(G, weight='weight'):\n\t# 1) indexing the edges by community\n\tsum_edges_dic = { com : {} for com in range(G.nb_communities)}\n\tfor node1, node2 in G.edges():\n\t\tcomm1 = G.nodes[node1]['community']\n\t\tcomm2 = G.nodes[node2]['community']\n\t\tif comm2 not in sum_edges_dic[comm1]:\n\t\t\tsum_edges_dic[comm1][comm2] = 0\n\t\t\tsum_edges_dic[comm2][comm1] = 0\n\t\telse:\n\t\t\tif weight is None:\n\t\t\t\tsum_edges_dic[comm1][comm2] += 1\n\t\t\t\tsum_edges_dic[comm2][comm1] += 1\n\t\t\telse:\t\n\t\t\t\tsum_edges_dic[comm1][comm2] += G.edges[node1, node2][weight]\n\t\t\t\tsum_edges_dic[comm2][comm1] += G.edges[node1, node2][weight]\n\tc_connectivity = {}\n\t# 2) computing the connectivity\n\tfor com in sum_edges_dic:\n\t\tin_out_edges = sum(sum_edges_dic[com].values())\n\t\tc_connectivity[com] = round(- np.log2(sum_edges_dic[com][com] / in_out_edges),3) \n\treturn c_connectivity", "def get_connections_out(self) -> dict:\n return self.__ni_out", "def datasets() -> Community:\n\n global _COMMUNITY # noqa: PLW0603\n\n if _COMMUNITY is None:\n _COMMUNITY = Community.from_id(Configuration().community)\n\n return _COMMUNITY", "def __get_accident_com(self):\n accident_communes = (\n self.__raw_caracteristiques.com.value_counts().rename_axis(\"insee_com\").reset_index(name=\"nb_accident\")\n )\n merge = pd.merge(accident_communes, self.communes_info, on=\"insee_com\")\n merge[\"type\"] = [\"commune\" for string in range(len(merge.index))]\n return merge", "def populate_community():\n\t\tfor i in range(1,11):\n\t\t\tSimulation.community[\"person\"+str(i)] = Person(\"person\"+str(i))", "def connected_component(self):\n t1 = datetime.datetime.now()\n nodes = set(x.hex for x in self.agents)\n result = []\n while nodes:\n node = nodes.pop()\n # This set will contain the next group of nodes connected to each other.\n group = {node}\n # Build a queue with this node in it.\n queue = [node]\n # Iterate the queue.\n # When it's empty, we finished visiting a group of connected nodes.\n while queue:\n # Consume the next item from the queue.\n node = queue.pop(0)\n # Fetch the neighbors.\n neighbors = set(x for x in node.fon if x.is_occupied == 1)\n # Remove the neighbors we already visited.\n neighbors.difference_update(group)\n # Remove the remaining nodes from the global set.\n nodes.difference_update(neighbors)\n # Add them to the group of connected nodes.\n group.update(neighbors)\n # Add them to the queue, so we visit them in the next iterations.\n queue.extend(neighbors)\n\n # Add the group to the list of groups.\n result.append(len(group))\n td = datetime.datetime.now() - t1\n print(\"calculated {} connected components in {} seconds\".format(len(result),td.total_seconds()))\n return len(result), np.histogram(result, self.cluster_hist_breaks)[0]", "def getNeighbor(self, neighborID):", "def make_communities(community_side, communities_per_side):\n community_size = community_side * community_side\n communities = []\n seed_node = 0\n for i in range(communities_per_side):\n for j in range(communities_per_side):\n community = []\n for k in range(community_side):\n for z in range(community_side):\n _id = (\n communities_per_side * community_size * i\n + community_side * j\n + z\n + k * (communities_per_side * community_side)\n )\n # print(f\"{_id} \", end=\"\")\n community.append(_id)\n # print(\"- \", end=\"\")\n communities.append(community)\n #print()\n return communities", "def create_network(self, community_detection, wt_steps, n_clust, network_from, neighbors, top):\n \n if network_from == 'top_n':\n sort_by_scores = []\n\n for pair, score in scores_update.items():\n sort_by_scores.append([pair[0], pair[1], score[2]])\n top_n = sorted(sort_by_scores, reverse=False, key=lambda x: x[2])[:top]\n\n # Convert from distance to similarity for edge\n for score in top_n: \n c = 1/(1 + score[2])\n score[2] = c\n\n flat = [tuple(pair) for pair in top_n]\n\n elif network_from == 'knn': \n flat = []\n projection_knn = nearest_neighbors(neighbors=neighbors)\n\n for projection, knn in projection_knn.items():\n for n in knn:\n flat.append((projection, n[0], abs(n[3]))) # p1, p2, score\n\n clusters = {}\n g = Graph.TupleList(flat, weights=True)\n\n if community_detection == 'walktrap':\n try:\n wt = Graph.community_walktrap(g, weights='weight', steps=wt_steps)\n cluster_dendrogram = wt.as_clustering(n_clust)\n except:\n self.show_cluster_fail()\n elif community_detection == 'betweenness':\n try:\n ebs = Graph.community_edge_betweenness(g, weights='weight', directed=True)\n cluster_dendrogram = ebs.as_clustering(n_clust)\n except:\n self.show_cluster_fail()\n\n for community, projection in enumerate(cluster_dendrogram.subgraphs()):\n clusters[community] = projection.vs['name']\n\n #convert node IDs back to ints\n for cluster, nodes in clusters.items():\n clusters[cluster] = sorted([int(node) for node in nodes])\n \n remove_outliers(clusters)\n\n clustered = []\n for cluster, nodes in clusters.items():\n for n in nodes:\n clustered.append(n)\n\n clusters['singles'] = [] # Add singles to clusters if not in top n scores\n clusters['removed'] = []\n \n for node in projection_2D:\n if node not in clustered and node not in drop:\n clusters['singles'].append(node)\n elif node in drop:\n clusters['removed'].append(node)\n \n G = nx.Graph()\n\n for pair in flat:\n G.add_edge(int(pair[0]), int(pair[1]), weight=pair[2])\n\n #if you want to see directionality in the networkx plot\n #G = nx.MultiDiGraph(G)\n\n #adds singles if not in top n scores\n for node_key in projection_2D:\n if node_key not in G.nodes:\n G.add_node(node_key)\n\n return flat, clusters, G", "def components_graph(geo, stereo=True):\n return automol.graph.connected_components(graph(geo, stereo=stereo))", "def collect(self):\n import ray\n return ray.get([shard.get_data.remote() for shard in self.shard_list])", "def get_connections_in(self) -> dict:\n return self.__ni_in", "def connected_component(self, id1: int) -> list:\n list1 = []\n list2 = []\n if id1 in self.dw_graph.nodes:\n list1 = self.bfs(id1, False)\n list2 = self.bfs(id1, True)\n\n list3 = []\n temp=set(list2)\n for value in list1 :\n if value in temp:\n list3.append(value)\n self.dw_graph.nodes[value].distance=-10\n list3.sort()\n return list3", "def find_relation(sensors):\n sensor1name = \"humidity\"\n sensor2name = \"roadsurfacetemperature1\"\n sensor1val = {}\n sensor2val = {}\n\n for sensor in sensors:\n if sensor1name == sensor[\"name\"]:\n sensor1val.update({sensor[\"id\"]:sensor[\"value\"]})\n sensor1unit = sensor[\"unit\"]\n elif sensor2name == sensor[\"name\"]:\n sensor2val.update({sensor[\"id\"]:sensor[\"value\"]})\n sensor2unit = sensor[\"unit\"]\n\n var1 = []\n var2 = []\n\n for key in set(sensor1val).intersection(set(sensor2val)):\n var1.append(sensor1val[key])\n var2.append(sensor2val[key])\n\n plt.scatter(var1, var2)\n plt.title(sensor1name + \" vs \" + sensor2name)\n plt.xlabel(sensor1name + \" ( \" + sensor1unit + \" )\")\n plt.ylabel(sensor2name + \" ( \" + sensor2unit + \" )\")\n plt.show()\n\n return var1, var2", "def getNodes(self):\n data = self.connect('get','nodes',None)\n return data", "def detection_algorithm(G, edge_weight):\n Gc = G.copy()\n set_node_attributes(Gc, attr_name='k-index')\n seed_node2communities = {}\n\n from operator import itemgetter\n while Gc.number_of_nodes() > 0:\n seed_node = max(list(Gc.nodes(data='k-index')), key=itemgetter(1))[0]\n nodes_in_community, modularity = find_local_community(Gc, seed_node=seed_node, weight=edge_weight)\n seed_node2communities[seed_node] = (nodes_in_community, modularity)\n Gc.remove_nodes_from(nodes_in_community)\n return seed_node2communities", "def community_changes(self, community):\n pass", "def connected_components(self):\n return [_connected_components.remote(self.rows)]", "def get_outgoing_connections(self, comp):\n return self.connections.get(comp.id, [])", "def get_connected(self, env, s):\n raise NotImplementedError", "def testCommunityParameters(self):\n references = self.tree.get_community_references()\n self.assertListEqual([1, 2, 3, 4, 5, 6], references)\n reference1_dict = {\"speciation_rate\": 0.5, \"time\": 0.0, \"fragments\": 0, \"metacommunity_reference\": 0}\n reference5_dict = {\"speciation_rate\": 0.7, \"time\": 0.5, \"fragments\": 0, \"metacommunity_reference\": 0}\n self.assertDictEqual(reference1_dict, self.tree.get_community_parameters(reference=1))\n self.assertDictEqual(reference5_dict, self.tree.get_community_parameters(reference=6))", "def E(self) -> list:\n res = []\n for v in self.V():\n res.extend([(v.name, i) for i in v.get_connections().keys()])\n return res", "def get_members(self, community):\n return set(member for cid, member in self._associations if community.cid == cid)", "def learn_connectome(self):\n episode_nodes = [node for node in self.container.nodes if node.is_episode]\n if len(episode_nodes) < 2:\n return\n connections_counter = {}\n for node in episode_nodes:\n self._collect_episode_callout_stats(node, connections_counter)\n\n pair_list = [(key, connections_counter[key]) for key in connections_counter]\n pair_list.sort(key=lambda item: item[1], reverse=True)\n top_count = pair_list[0][1]\n if top_count < 4:\n return\n # make connections for the top half of pairs\n for pair, cnt in pair_list:\n if cnt > top_count // 2:\n self._make_connection_for_pair(pair)", "def Results(self):\n client_data_list = list()\n for client in self.client_list:\n client_data_list.append(client.data)\n server_data_list = list()\n for server in self.server_list:\n server_data_list.append(server.data)\n return (server_data_list, client_data_list)", "def getConnectionsBetweenSuperPeers(self):\r\n raise NotImplementedError()", "def transition(self):\n for index, row in self.community_table.iterrows():\n\n # CANOPY BASED SUCCESSION\n if row.succession_code == 1:\n self.ecocommunities[(self.ecocommunities == index) &\n (self.canopy > row['max_canopy'])] = row.to_ID\n\n # AGE BASED SUCCESSION\n elif row.succession_code == 2:\n self.ecocommunities = np.where((self.ecocommunities == index) &\n (self.forest_age > row['age_out']),\n self.climax_communities, self.ecocommunities)", "def test_intercommunalitys_get(self):\n pass", "def getMembers():", "def getMembers():", "def getMembers():", "def getMembers():", "def parse_community_cards(state):\n community_cards = list()\n community = state['game']['community']\n if community and 'card' in community:\n for card in community['card']:\n new_card = robopoker.entities.Card(card['@rank'], card['@suit'])\n community_cards.append(new_card)\n return community_cards", "def get_neighbours(self, value):\n\t\tnode = self.get_node(value)\n\t\tneighbours = [key.value for key in node.edges.keys()]\n\t\treturn neighbours", "def get_connection_info(self):\n return [(c.fullname, [u[1] for u in c.objects])\n for c in self._connections]", "def remote_connections(self):\r\n\r\n self.remote = self.newest_connections[~((self.newest_connections['remote_address'] == '0.0.0.0') | (self.newest_connections['remote_address'] == '127.0.0.1'))]\r\n return self.remote", "def connections(self, src=False, dst=True, params=True): \n conns = []\n if params:\n if src:\n #grab the node params that this node is a src to\n edges = self.parent.graph.out_edges(self, data=True) \n conns.extend([ edge[2][\"dst_param\"] for edge in edges ])\n if dst:\n #grab the node param that this node is a dst to\n edges = self.parent.graph.in_edges(self, data=True) \n conns.extend([ edge[2][\"src_param\"] for edge in edges ])\n else: \n if src:\n conns.extend(self.parent.graph.successors(self))\n if dst:\n conns.extend(self.parent.graph.predecessors(self))\n \n return conns", "def listCollaborators(self, collector, data=False):\n if data==False: return list(self.neighbors(collector))\n elif data==True: return [(n,self.node[n]) for n in self.neighbors(collector)]\n else: return [(n,self.node[n].get(data)) for n in self.neighbors(collector)]", "def find_communities(graph):\n visited = set()\n communities = []\n for node in graph:\n if node not in visited:\n community = _find_community(node, graph, visited)\n communities.append(community)\n\n return communities", "def get_connected_user():\n usernames = clients.keys()\n data = json.dumps(usernames)\n emit('on_client_list_received', data)", "def get_conn_matrix_vector(self):\n\n vect = []\n for line in sorted(self.connection_matrix):\n for item in self.connection_matrix[line]:\n vect.append(item)\n\n return vect", "def cluster(self):\n logger.debug(\"Beginning feature based clustering on %d clusters.\" % len(self.c2b))\n # Merge the two nearest clusters until we can't.\n #\n while self.mergeNearestClusters():\n pass\n logger.debug(\"After clustering, there are now %d clusters remaining.\" % len(self.c2b))\n return self.c2b.values()", "def getNodeNetworks(self,node):\n data = self.connect('get','nodes/%s/network' % (node),None)\n return data", "def pull(self):\n \n data = self.s.recv(1024)\n if data:\n info = ET.fromstring(data)\n info = { info[0].tag : info[0].text, info[1].tag : info[1].text}\n #print(info)\n \n return info.get(\"topic\"), info.get(\"value\")\n pass", "def connectedComponents(catalog):\n catalog[\"components\"] = scc.KosarajuSCC(catalog[\"connections\"])\n return scc.connectedComponents(catalog[\"components\"])", "def edges(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].edges.values()])", "def community_matching(\n self, method: Callable[[set, set], float], two_sided: bool = False\n ) -> list:\n\n if self.matching is not None:\n return self.matching\n\n lifecycle = []\n\n for i in range(self.current_observation - 1):\n c_i = self.clusterings[i]\n c_j = self.clusterings[i + 1]\n for name_i, com_i in c_i.named_communities.items():\n\n # name_i = f\"{self.obs_to_time[i]}_{cid_i}\"\n best_match = []\n best_score = 0\n\n for name_j, com_j in c_j.named_communities.items():\n # name_j = f\"{self.obs_to_time[i+1]}_{cid_j}\"\n\n match = method(com_i, com_j)\n if match > best_score:\n best_match = [name_j]\n best_score = match\n elif match == best_score:\n best_match.append(name_j)\n\n for j in best_match:\n lifecycle.append((name_i, j, best_score))\n\n if two_sided:\n\n for i in range(self.current_observation - 1, 0, -1):\n c_i = self.clusterings[i]\n c_j = self.clusterings[i - 1]\n\n for name_i, com_i in c_i.named_communities.items():\n # name_i = f\"{self.obs_to_time[i]}_{cid_i}\"\n best_match = []\n best_score = 0\n\n for name_j, com_j in c_j.named_communities.items():\n # name_j = f\"{self.obs_to_time[i-1]}_{cid_j}\"\n\n match = method(com_i, com_j)\n if match > best_score:\n best_match = [name_j]\n best_score = match\n elif match == best_score:\n best_match.append(name_j)\n\n for j in best_match:\n lifecycle.append((j, name_i, best_score))\n\n self.matched = lifecycle\n\n return lifecycle", "def show_neighbours(self):\n if self.connected_to:\n s = \"\"\n for connection in self.connected_to:\n s += f\"{connection.get_name()} \"\n return s\n return \"No neighbours\"", "def _get_cluster_list(self):\n return self.__cluster_list", "def fetch_community_crime_data(dpath='/tmp/glm-tools'):\n if os.path.exists(dpath):\n shutil.rmtree(dpath)\n os.mkdir(dpath)\n\n fname = os.path.join(dpath, 'communities.csv')\n base_url = (\"http://archive.ics.uci.edu/ml/machine-learning-databases\")\n url = os.path.join(base_url, \"communities/communities.data\")\n urllib.urlretrieve(url, fname)\n\n # Read in the file\n df = pd.read_csv('/tmp/glm-tools/communities.csv', header=None)\n\n # Remove missing values\n df.replace('?', np.nan, inplace=True)\n df.dropna(inplace=True, axis=1)\n df.dropna(inplace=True, axis=0)\n df.reset_index(inplace=True, drop=True)\n\n # Extract predictors and target from data frame\n X = np.array(df[df.keys()[range(3, 102)]])\n y = np.array(df[127])\n\n return X, y", "def show_networks():\n return get_networks()", "def construct_graph_connection(coord_list, radie):\n\n connection_distance = []\n connection = []\n for j, data in enumerate(coord_list):\n '''Calculate the relative distance of the nodes'''\n distance = np.hypot(coord_list[:,0]-data[0], coord_list[:,1]-data[1])\n '''save nodes which are in range'''\n #for i, data in enumerate(distance):\n for i in range(j+1, len(distance)):\n data = distance[i]\n if data < radie:\n connection.append([j, i])\n connection_distance.append(data)\n\n\n connection_distance = np.array(connection_distance)\n connection = np.array(connection)\n return connection, connection_distance", "def neighbors(node, topology):\n return [n for n in topology[node]]" ]
[ "0.69425935", "0.636551", "0.6130402", "0.6092634", "0.60580534", "0.5994856", "0.58466315", "0.57885724", "0.5787836", "0.5702516", "0.5695605", "0.56887186", "0.5679699", "0.5653863", "0.56507945", "0.5644417", "0.5626879", "0.560844", "0.5607573", "0.55997175", "0.55808896", "0.55255866", "0.55242157", "0.54324555", "0.5429693", "0.5427714", "0.54099387", "0.53904706", "0.53792864", "0.53459084", "0.5343128", "0.5322994", "0.5313734", "0.5312173", "0.5310703", "0.5299429", "0.529213", "0.5286844", "0.5273329", "0.52727574", "0.52727574", "0.52727574", "0.52643096", "0.5256684", "0.52561444", "0.5236377", "0.52244264", "0.522402", "0.52022004", "0.5199084", "0.51899624", "0.5172805", "0.517165", "0.51638705", "0.5161408", "0.5122769", "0.5115852", "0.5113638", "0.51111656", "0.5104079", "0.508571", "0.50846606", "0.5061703", "0.50576764", "0.5056763", "0.5054502", "0.5054291", "0.50506836", "0.5050157", "0.5049307", "0.5033273", "0.5033039", "0.5028384", "0.50260437", "0.50209177", "0.5016104", "0.5015927", "0.5015927", "0.5015927", "0.5015927", "0.5014737", "0.5013366", "0.5000811", "0.4996538", "0.49927503", "0.4990112", "0.4986362", "0.49712697", "0.49584067", "0.49489146", "0.49464127", "0.49308446", "0.4929158", "0.49284247", "0.4920901", "0.4912276", "0.4910731", "0.49074814", "0.49067724", "0.48990446", "0.48877472" ]
0.0
-1
Calculate the generation offset by connecting a transmission line to the community to connect to. Attributes
def calc_intertie_offset_generation (self): self.generation = \ self.forecast.get_generation(self.start_year,self.end_year) dist = self.comp_specs['distance to community'] self.annual_transmission_loss = \ 1 - ( (1- (self.comp_specs['transmission loss per mile']/ 100.0)) ** dist) self.intertie_offset_generation = \ self.generation * (1 + self.annual_transmission_loss) gen_eff = self.intertie_generation_efficiency self.intertie_offset_generation_fuel_used = \ self.intertie_offset_generation / gen_eff #~ print 'self.proposed_generation',self.proposed_generation #~ print con
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_and_set_propagation_distances(self):\n\n self.l_edge = self.calculate_distance_edge()\n self.l_int = self.calculate_distance_interaction()", "def calculate_module_offsets(self):\n \n # These aren't for instantiating, but we use them to get the dimensions\n self.poly_contact_offset = vector(0.5*contact.poly.width,0.5*contact.poly.height)\n\n # M1/M2 routing pitch is based on contacted pitch\n self.m1_pitch = max(contact.m1m2.width,contact.m1m2.height) + max(self.m1_space,self.m2_space)\n self.m2_pitch = max(contact.m2m3.width,contact.m2m3.height) + max(self.m2_space,self.m3_space)\n \n # This corrects the offset pitch difference between M2 and M1\n self.offset_fix = vector(0.5*(self.m2_width-self.m1_width),0)\n\n # delay chain will be rotated 90, so move it over a width\n # we move it up a inv height just for some routing room\n self.rbl_inv_offset = vector(self.delay_chain.height, self.inv.width)\n # access TX goes right on top of inverter, leave space for an inverter which is\n # about the same as a TX. We'll need to add rails though.\n self.access_tx_offset = vector(1.25*self.inv.height,self.rbl_inv_offset.y) + vector(0,2.5*self.inv.width)\n self.delay_chain_offset = self.rbl_inv_offset + vector(0,4*self.inv.width)\n\n # Replica bitline and such are not rotated, but they must be placed far enough\n # away from the delay chain/inverter with space for three M2 tracks\n self.bitcell_offset = self.rbl_inv_offset + vector(2*self.m2_pitch, 0) + vector(0, self.bitcell.height + self.inv.width)\n\n self.rbl_offset = self.bitcell_offset\n\n \n self.height = self.rbl_offset.y + self.rbl.height + self.m2_pitch\n self.width = self.rbl_offset.x + self.bitcell.width", "def get_propagation_time(self):\n return 0.0 # self.get_distance_to_gateway() / (3 * pow(10,8))", "def calculate_propagation(self):\n pass", "def chain_offset(self):\n return self._chain_offset", "def calc_net_generation_wind (self):\n self.net_generation_wind = self.generation_wind_proposed - \\\n self.transmission_losses -\\\n self.excess_energy\n #~ print 'self.net_generation_wind',self.net_generation_wind", "def relativize_coordinates(self):\n if len(self.nodes) + len(self.connecting) < 1:\n return\n smallest_c = (self.nodes+self.connecting)[0].c\n for node in self.nodes+self.connecting:\n if node.c < smallest_c:\n smallest_c = node.c\n for node in self.nodes+self.connecting:\n node.c = node.c - smallest_c", "def lidar_relative(self):\n return self.distance", "def get_shapeOffset(self):\n try:\n _str_func = ' get_shapeOffset'.format(self)\n log.debug(\"|{0}| >> ... [{1}]\".format(_str_func,self)+ '-'*80)\n \n ml_check = self.getBlockParents()\n ml_check.insert(0,self)\n \n for mBlock in ml_check:\n l_attrs = ['controlOffset','skinOffset']\n for a in l_attrs:\n if mBlock.hasAttr(a):\n v = mBlock.getMayaAttr(a)\n log.debug(\"|{0}| >> {1} attr found on rigBlock: {2} | {3}\".format(_str_func,a,v,mBlock.mNode)) \n return v \n return 1\n except Exception,err:cgmGEN.cgmExceptCB(Exception,err,msg=vars())", "def get_alignment_offset(self):\n\n return 0", "def position_of_transmission(self, transmission):\n if transmission <= 0:\n transmission = 1e-6\n OD = -log10(transmission)\n\n if OD >= self.OD_max:\n return self.motor_max\n if OD <= self.OD_min:\n return self.motor_min\n\n p_min = self.motor_range[0]\n p_max = self.motor_range[1]\n angle = p_min + (OD - self.OD_range[0]) / self.OD_range[1] * (p_max - p_min)\n # Assume the transmission is flat outside the angular range\n if angle < min(p_min, p_max):\n angle = min(p_min, p_max)\n if angle > max(p_min, p_max):\n angle = max(p_min, p_max)\n return angle", "def _position_to_offset(self, position: Position) -> int:\n return self._line_offsets[position.line] + position.character", "def offset_graph():\n pylon_graph = graph.graph()\n base = square(ORIGIN, LENGTH)\n base_ids = pylon_graph.add_nodes(base, \"base\")\n pylon_graph.connect_neighbours(base_ids, LENGTH)\n all_ids = []\n for i in range(LEVELS):\n level = offset(base, LENGTH * i, \"z\")\n level_ids = pylon_graph.add_nodes(level, \"level\" + str(i))\n all_ids.extend(level_ids)\n pylon_graph.connect_neighbours(all_ids, LENGTH)\n return pylon_graph", "def offset(self):\n return self.__offset", "def offset(self):\n return self.__offset", "def assign_lengths(G):\r\n for u, v, d in G.edges(data=True):\r\n posA = nx.get_node_attributes(G, 'pos')[u]\r\n posB = nx.get_node_attributes(G, 'pos')[v]\r\n\r\n dist = np.linalg.norm(np.subtract(posA, posB))\r\n d['distance'] = dist\r\n return G", "def relative_rate(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_relative_rate(self)", "def calculate_distance_line(\n r_packet, comov_nu, is_last_line, nu_line, time_explosion\n):\n\n nu = r_packet.nu\n\n if is_last_line:\n return MISS_DISTANCE\n\n nu_diff = comov_nu - nu_line\n\n # for numerical reasons, if line is too close, we set the distance to 0.\n if r_packet.is_close_line:\n nu_diff = 0.0\n r_packet.is_close_line = False\n\n if nu_diff >= 0:\n distance = (nu_diff / nu) * C_SPEED_OF_LIGHT * time_explosion\n else:\n print(\"WARNING: nu difference is less than 0.0\")\n raise MonteCarloException(\n \"nu difference is less than 0.0; for more\"\n \" information, see print statement beforehand\"\n )\n\n if numba_config.ENABLE_FULL_RELATIVITY:\n return calculate_distance_line_full_relativity(\n nu_line, nu, time_explosion, r_packet\n )\n return distance", "def pad_instance(line):\n \n # split the line and extract attributes\n attributes = line.split(\",\")\n seq = attributes[0].strip()\n inc = int(attributes[1])\n out = int(attributes[2])\n lifetime = float(attributes[3])\n classify = attributes[4]\n inc_50 = int(attributes[5])\n out_50 = int(attributes[6])\n\n # how many cells were sent/received before any padding\n initial_num_cells = inc + out\n\n # the ratio of outgoing cells to incoming cells\n out_in_ratio = float(out)/float(inc)\n new_seq, orig_seq_length, inc_added, out_added = pad_sequence(seq)\n \n # account for added beginning sequence padding in overall total\n inc += inc_added\n out += out_added\n\n # account for added beginning sequence padding in first 50 or so cells\n inc_50 += inc_added\n out_50 += out_added\n\n out_padding = 0\n in_padding = 0\n \n # flip a coin\n coin = random.randint(1, 9)\n \n # if the circuit has more incoming cells than outgoing cells \n # (typical of Client-RP)\n if classify != \"noise\" and out_in_ratio < 0.98:\n \n # pad the outgoing cells to bring the ratios closer\n if coin <= 4:\n out_padding = int(out / out_in_ratio * 0.85)\n else:\n out_padding = int(out / out_in_ratio * 1.05)\n \n # if there are more outgoing than incoming cells \n # (typical of HS-RP)\n elif classify != \"noise\" and out_in_ratio > 1.02:\n \n # pad the incoming cells to bring the ratios closer\n if coin <= 4:\n in_padding = int(inc * out_in_ratio * 0.9)\n else:\n in_padding = int(inc * out_in_ratio * 1.05)\n\n # add the appropriate padding to the overall totals\n inc += in_padding\n out += out_padding\n\n # we have to account for how padding would affect the first 50 or so cells\n first_cells = inc_50 + out_50\n first_ratio = float(inc_50)/first_cells\n if first_cells > 50:\n first_cells = 50\n \n # the first 50 cells should have a similar ratio to the padding\n new_inc_percent = float(inc) / (inc + out)\n \n # add a bit of randomness to the first 50 if they are not noise\n first_random = random.randint(1, 201) / 1000.0\n flip = random.randint(1, 11)\n if flip % 2 == 0:\n if new_inc_percent + new_inc_percent * first_random < 1:\n new_inc_percent += new_inc_percent * first_random\n else:\n if new_inc_percent - new_inc_percent * first_random < 1:\n new_inc_percent -= new_inc_percent * first_random\n\n general = False\n # don't mess with the ratio if we didn't pad the whole thing\n if classify == \"noise\":\n general = True\n new_inc_percent = first_ratio\n\n # the first 50 cells should follow the padded ratio\n inc_50 = int(new_inc_percent * first_cells)\n out_50 = first_cells - inc_50\n\n # the padded instance for the new file\n padded_instance = new_seq + \",\" + str(inc) + \",\" + str(out) + \",\" \\\n + str(lifetime) + \",\" + classify + \",\" + str(inc_50) + \",\" + str(out_50)\n\n num_cells_with_padding = inc + out\n\n # return the padded instance, the initial number of cells for the circuit,\n # and the number of cells after padding, because we need to know\n # how much overhead the padding adds\n return padded_instance, initial_num_cells, num_cells_with_padding, general", "def calculateRelations(self, nPlayer, nTarget):\n\t\t\n\t\tif (nPlayer != nTarget and gc.getTeam(gc.getPlayer(nPlayer).getTeam()).isHasMet(gc.getPlayer(nTarget).getTeam())):\n\t\t\tnAttitude = 0\n\t\t\tszAttitude = CyGameTextMgr().getAttitudeString(nPlayer, nTarget)\n\t\t\tltPlusAndMinuses = re.findall (\"[-+][0-9]+\\s?: \", szAttitude)\n\t\t\tfor i in range (len (ltPlusAndMinuses)):\n\t\t\t\tnAttitude += int (ltPlusAndMinuses[i][:-2])\n\t\t\treturn nAttitude\n\t\telse:\n\t\t\treturn 0", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def __init__(self, owner1: 'ln.LightningNode', owner2: 'ln.LightningNode'):\n self.address = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8))\n self.owner1 = owner1\n self.owner2 = owner2\n self.total_msat = 0 # will be changed as owners deposit funds.", "def rel_position_um(soma, d):\n \n return soma['wParamsNum'][26:29] - d['wParamsNum'][26:29]", "def process_laneOffset(self):\n center_line = np.poly1d(np.mean([self.line_l.get_LinePoly().coeffs, self.line_r.get_LinePoly().coeffs], axis=0))\n # store the center line polynomial\n self.center_poly = center_line\n center_point = IMAGE_WIDTH/2 - center_line(709)\n offset_from_center =center_point* self.line_l.x_pxm\n self.lane_offset = offset_from_center\n return center_point", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d", "def line(self) -> int:", "def __init__(self):\n self.x = 0.0\n self.y = 0.0\n self.theta = 0.0\n self.total_distance_covered = 0.0", "def calc_transmission_losses (self):\n #~ print self.generation_wind_proposed, self.cd['line losses']\n self.transmission_losses = self.generation_wind_proposed * \\\n (self.cd['line losses'] / 100.0)\n #~ print 'self.transmission_losses',self.transmission_losses", "def offset(self):\n\n return self._offset", "def _distance_covered(self):\n\n # Calculation of distance traveled compared to the previous point\n self.gap = math.sqrt((self.x - self.x_ant)**2\n + (self.y - self.y_ant)**2)\n\n self.x_ant = self.x\n self.y_ant = self.y\n\n return self.gap", "def getOffsetLine(self, distance, side=c.INSIDE):\n StartA = np.array([self.start.x, self.start.y])\n EndA = np.array([self.end.x, self.end.y])\n r = StartA - EndA #The slope vector of self\n rn = np.array([-r[c.Y], r[c.X]]) #flip x and y and inverse y to get the normal vector of the slope\n rn = rn/np.linalg.norm(rn)*distance #normalize by dividing by its magnitude and multipy by distance to get the correct length\n \n if side == c.INSIDE:\n return self.translate(-rn[c.X], -rn[c.Y]) #the \"minus\" side line is the left side which is inside.\n \n return self.translate(rn[c.X], rn[c.Y]) #the \"Plus\" side of the line is the right side which is outside.", "def marginal(self):\n m = np.zeros(len(self.domain))\n for fnode in self.neighbors:\n m += self.received[fnode]\n return np.exp(normalize(m))", "def lineOffset(self):\n if self.__lineOffset is None:\n self.__lineOffset = self.__offset - self.__source.rfind(\"\\n\", 0, self.__offset) - 1\n\n return self.__lineOffset", "def offset(self):\r\n return self._get_instantiation()[3]", "def get_line_to(self,target):\n\n m = (target.y - self.y) / (target.x - self.x)\n\n b = self.y - m * self.x\n\n return (m,b)", "def relative_rate(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_relative_rate(self)", "def origin_dist(self):\n if self._origin_dist is None:\n dist_columns = [\"latest\", \"ultimate\", \"reserve\"]\n self._origin_dist = self.reserve_dist.groupby(\n [\"sim\", \"origin\"], as_index=False)[dist_columns].sum()\n return(self._origin_dist)", "def __calculateOffset(self):\n #if len(self.__XValue) > 0:\n # print(\"GPSBearing: \"+str(round(self.__GPSBearing[-1]))+\", heading: \"+str(round(self.value))+\n # \", x: \"+str(round(self.__XValue[-1]))+\", stdev: \"+str(round(np.std(self.__GPSBearing),5))+\n # \", offset: \"+str(self.__offset))\n if len(self.__GPSBearing) == self.__GPSBearing.maxlen and np.std(self.__GPSBearing) < 0.2 and self.speed > 7:\n self.__offset = (np.mean(self.__GPSBearing) + 360 - np.mean(self.__XValue)) % 360\n #print(\"new offset: \"+str(self.__offset)) ", "def get_drawing_offset(self) -> Tuple2IntType:\n return self._drawing_offset", "def _generate_coordinates(self):\n a0 = +0.2969\n a1 = -0.1260\n a2 = -0.3516\n a3 = +0.2843\n a4 = -0.1036 # zero thickness TE\n\n x = np.linspace(0.0, 1.0, num=self.n_points)\n\n if len(self.digits) == 4:\n # Returns n+1 points in [0 1] for the given 4-digits NACA string\n m = float(self.digits[0]) / 100.0\n p = float(self.digits[1]) / 10.0\n t = float(self.digits[2:]) / 100.0\n\n # half-thickness distribution\n yt = 5 * t * (a0 * np.sqrt(x) + a1 * x + a2 * np.power(x, 2) +\n a3 * np.power(x, 3) + a4 * np.power(x, 4))\n\n if p == 0:\n # Symmetric foil\n self.xup_coordinates = np.linspace(0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yt\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.ydown_coordinates = -yt\n else:\n # Cambered foil\n xc1 = np.asarray([xx for xx in x if xx <= p])\n xc2 = np.asarray([xx for xx in x if xx > p])\n yc1 = m / np.power(p, 2) * xc1 * (2 * p - xc1)\n yc2 = m / np.power(1 - p, 2) * (1 - 2 * p + xc2) * (1 - xc2)\n # Y-coordinates of camber line\n yc = np.append(yc1, yc2)\n\n if self.cosine_spacing:\n # points are generated according to cosine distribution of\n # the X-coordinates of the chord\n dyc1_dx = m / np.power(p, 2) * (2 * p - 2 * xc1)\n dyc2_dx = m / np.power(1 - p, 2) * (2 * p - 2 * xc2)\n dyc_dx = np.append(dyc1_dx, dyc2_dx)\n theta = np.arctan(dyc_dx)\n self.xup_coordinates = x - yt * np.sin(theta)\n self.yup_coordinates = yc + yt * np.cos(theta)\n self.xdown_coordinates = x + yt * np.sin(theta)\n self.ydown_coordinates = yc - yt * np.cos(theta)\n else:\n # Linear spacing distribution of the foil coordinates\n self.xup_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yc + yt\n self.ydown_coordinates = yc - yt\n\n elif len(self.digits) == 5:\n # Returns n+1 points in [0 1] for the given 5-digits NACA string\n cld = float(self.digits[0]) * 0.15\n p = 5.0 * float(self.digits[1]) / 100.0\n s = float(self.digits[2])\n t = float(self.digits[3:]) / 100.0\n\n # half-thickness distribution\n yt = 5 * t * (a0 * np.sqrt(x) + a1 * x + a2 * np.power(x, 2) +\n a3 * np.power(x, 3) + a4 * np.power(x, 4))\n\n if s == 1:\n # Relfex camber\n P = np.array([0.1, 0.15, 0.2, 0.25])\n M = np.array([0.13, 0.2170, 0.318, 0.441])\n K = np.array([51.99, 15.793, 6.520, 3.191])\n elif s == 0:\n # Standard camber\n P = np.array([0.05, 0.1, 0.15, 0.2, 0.25])\n M = np.array([0.0580, 0.1260, 0.2025, 0.2900, 0.3910])\n K = np.array([361.4, 51.64, 15.957, 6.643, 3.230])\n else:\n raise ValueError(\n 'For NACA \"LPSTT\" the value of \"S\" can be either 0 or 1.')\n\n if p == 0:\n # Symmetric foil\n self.xup_coordinates = np.linspace(0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yt\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.ydown_coordinates = -yt\n else:\n # Cambered foil\n spl_m = splrep(P, M)\n spl_k = splrep(M, K)\n m = splev(p, spl_m)\n k1 = splev(m, spl_k)\n xc1 = np.asarray([xx for xx in x if xx <= m])\n xc2 = np.asarray([xx for xx in x if xx > m])\n yc1 = k1 / 6.0 * (np.power(xc1, 3) - 3 * m * np.power(xc1, 2) +\n np.power(m, 2) * (3 - m) * xc1)\n yc2 = k1 / 6.0 * np.power(m, 3) * (1 - xc2)\n yc = np.append(yc1, yc2)\n\n if self.cosine_spacing:\n # points are generated according to cosine distribution of\n # the X-coordinates of the chord\n zc = cld / 0.3 * yc\n dyc1_dx = 1.0 / 6.0 * k1 * (\n 3 * np.power(xc1, 2) - 6 * m * xc1 + np.power(m, 2) *\n (3 - m))\n dyc2_dx = np.tile(-1.0 / 6.0 * k1 * np.power(m, 3),\n len(xc2))\n dyc_dx = np.append(dyc1_dx, dyc2_dx)\n theta = np.arctan(dyc_dx)\n self.xup_coordinates = x - yt * np.sin(theta)\n self.yup_coordinates = zc + yt * np.cos(theta)\n self.xdown_coordinates = x + yt * np.sin(theta)\n self.ydown_coordinates = zc - yt * np.cos(theta)\n else:\n # Linear spacing distribution of the foil coordinates\n self.xup_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yc + yt\n self.ydown_coordinates = yc - yt\n\n else:\n raise Exception", "def get_grating_lines_per_mm(self):\r\n msg = struct.pack('>2B', 56, 2)\r\n response = self.query(msg)\r\n return struct.unpack('>H', response[:2])[0]", "def __init__(self):\n\t\tself.theta = 0.8\t\t\t# Theta value, the constant of the line which x+y is.(1.2 is best)\n\t\tself.numberOfInput = 0\t\t# The number of Input\n\t\tself.weight = []\t\t\t# The list of weight.", "def _get_next_offset(self):\n return self.__offset", "def get_offset(self):\n return self.offset", "def get_distance(self):\n print(\"voici la distance à l'obstacle\")", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def calculate_distance_edge(self):\n if self.mu > 0:\n # right interface is intersected next\n dx = self.cell_xr - self.x\n self.next_cell_index = self.cell_index + 1\n else:\n # left interface is intersected next\n dx = self.cell_xl - self.x\n self.next_cell_index = self.cell_index - 1\n\n return dx / self.mu", "def calc_distance(self, observation):\n actual_obs = observation[0]\n scrn_player = actual_obs.observation.feature_screen.player_relative\n scrn_select = actual_obs.observation.feature_screen.selected\n scrn_density = actual_obs.observation.feature_screen.unit_density\n\n state_added = scrn_select + scrn_density\n\n marine_center = np.mean(self.xy_locs(scrn_player == 1), axis=0).round()\n\n # first step\n if np.sum(scrn_select) == 0:\n marine_center = np.mean(self.xy_locs(scrn_player == 1), axis=0).round()\n # marine behind beacon\n if isinstance(marine_center, float):\n marine_center = np.mean(self.xy_locs(state_added == 2), axis=0).round()\n else:\n # normal navigation\n marine_center = np.mean(self.xy_locs(state_added == 2), axis=0).round()\n if isinstance(marine_center, float):\n marine_center = np.mean(self.xy_locs(state_added == 3), axis=0).round()\n\n beacon_center = np.mean(self.xy_locs(scrn_player == 3), axis=0).round()\n #\n # print(state_added)\n # print(\"---- Marine {} | {} Beacon ----\".format(marine_center, beacon_center))\n # time.sleep(0.2)\n distance = math.hypot(beacon_center[0] - marine_center[0],\n beacon_center[1] - marine_center[1])\n\n return beacon_center, marine_center, distance", "def update(self):\n return_code, pos_new = sim.simxGetObjectPosition(self.clientID, self.from_handle, self.to_handle, sim.simx_opmode_streaming)\n self.distance = np.linalg.norm(np.asarray(pos_new))", "def _add_offsets_to_token_nodes(self):\n for edge_index in self._textual_relation_ids:\n token_node_index = self.edges[edge_index].source\n self.nodes[token_node_index].onset = self.edges[edge_index].onset\n self.nodes[token_node_index].offset = self.edges[edge_index].offset", "def calculation_of_propagation(self): \n \n prop = PopulationPropagator(world.time, rate_matrix=world.KK)\n \n pop_ini = numpy.array([1.0, 0.0])\n \n pop_t = prop.propagate(pop_ini)\n \n sta = world.subtime\n \n U = prop.get_PropagationMatrix(sta)\n \n pop_sub = numpy.zeros((2,sta.length))\n \n for i in range(sta.length):\n pop_sub[:,i] = numpy.dot(U[:,:,i],pop_ini) \n \n world.pop_t = pop_t\n world.pop_sub = pop_sub", "def offset(self) -> Tuple[int, int]:\n return (self.ioffset[0].to_pixels(self.parent.width),\n self.ioffset[1].to_pixels(self.parent.height))", "def calculate_distance_edge(self):\n mu_star = -np.sqrt(1. - (self.cell_xl / self.x)**2)\n\n if self.mu <= mu_star:\n\n l_edge = (-self.mu * self.x -\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xl**2))\n self.next_cell_index = self.cell_index - 1\n\n else:\n\n l_edge = (-self.mu * self.x +\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xr**2))\n self.next_cell_index = self.cell_index + 1\n\n return l_edge", "def setup_layout_constants(self):\n # determines the spacing between the edge and nmos (rail to active\n # metal or poly_to_poly spacing)\n half_gate_to_gate = 0.5 * (drc[\"poly_to_poly\"] - drc[\"minwidth_metal1\"])\n edge_to_nmos = max(drc[\"metal1_to_metal1\"] - self.nmos.active_contact_positions[0].y,\n half_gate_to_gate - self.nmos.poly_positions[0].y)\n\n # determine the position of the first transistor from the left\n self.nmos_position1 = vector(0,\n 0.5 * drc[\"minwidth_metal1\"] + edge_to_nmos)\n offset = self.nmos_position1 + vector(0,self.nmos.height)\n\n x = vector(self.nmos.active_width - self.nmos.active_contact.width, 0)\n self.nmos_position2 = x + self.nmos_position1.scale(0,1)\n\n # determines the spacing between the edge and pmos\n edge_to_pmos = max(drc[\"metal1_to_metal1\"] - self.pmos.active_contact_positions[0].y,\n half_gate_to_gate - self.pmos.poly_positions[0].y)\n self.pmos_position1 = vector(0,\n self.height - 0.5 * drc[\"minwidth_metal1\"]\n - edge_to_pmos - self.pmos.height)\n self.pmos_position2 = self.pmos_position1 + vector(self.pmos.width,0)\n\n self.well_width = max(self.pmos_position2.x + self.pmos.active_position.x\n + self.pmos.active_width\n + drc[\"active_to_body_active\"] + self.nwell_contact.width \n + drc[\"well_enclosure_active\"],\n self.nmos_position2.x + self.nmos.active_position.x \n + self.nmos.active_width \n + drc[\"active_to_body_active\"] + drc[\"well_enclosure_active\"])\n self.width = self.well_width", "def lineLength(node1, node2):\n return ((node2[1] - node1[1])**2 + (node2[0] - node1[0])**2)**(1/2)", "def edit_distance(self):\n\n edit_dist = 0\n misaligned = False\n\n try:\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = [g.strip() for g in gt_file.readlines()]\n\n num_symbols = 0\n bd = 0\n # Go through all lines (for polyphony)\n for i in range(len(out_lines)):\n # Skip comparing sequence staff line\n if 'Sequence staff' in gt_lines[i]:\n continue\n\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n #print('Out:',out_split)\n #print('Gt:',gt_split)\n\n num_symbols += len(gt_split) # for calculating symbol error rate\n misaligned = 'misaligned' in out_lines[i] # for ensembling\n\n _a = [symbol for symbol in out_split if symbol != '\\n' and symbol != -1]\n _b = [symbol for symbol in gt_split if symbol != '\\n' and symbol != -1]\n\n ed = self.levenshtein(_a,_b)\n \n # Account for barline at end (don't use when checking CRNN output)\n #if ed == 1 and out_split[-1] == 'barline' and gt_split[-1] != 'barline':\n # ed = 0\n \n edit_dist += ed\n \n staff_num = (i + 1) // 2\n \n if ed == 1:\n pass\n #print(self.output_file)\n #print('Edit dist (staff #%d): %d' % (staff_num, ed))\n \n if _a[-1] == 'barline' and _b[-1] != 'barline' or \\\n _a[-1] != 'barline' and _b[-1] == 'barline':\n #print('Barline diff') \n # print(self.output_file)\n bd = 1\n #print(_a)\n #print(_b)\n \n\n '''\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n '''\n except FileNotFoundError:\n print('Missing:',self.output_file, self.gt_file)\n return -1, 1, 0, False\n #print('Found:',self.output_file, self.gt_file)\n return edit_dist, num_symbols, bd, misaligned", "def intercept(self):\n points = 0\n for asteroid in range(len(self.asteroid_type) - 1, -1, -1):\n if self.distance(self.Main_Ship, self.asteroid_type[asteroid]) < (self.spaceship_radius + self.asteroid_r[asteroid]):\n points += (self.asteroid_r[asteroid] + self.asteroid_speed[asteroid])\n self.del_asteroid(asteroid)\n return points", "def __compute_connection_strengths(G, induced_ordering, previous_vertex):\n # for edges incident to the previous vertex added to the induced ordering\n for edge in previous_vertex.get_incident_edges():\n other_node = edge.get_other_node(previous_vertex.get_name()) # get the other node object\n if other_node.get_name() not in induced_ordering: # if the other node is not in the ordering\n other_node.set_attribute_value(\n StoerWagner.CONNECTION_STRENGTH_ATTRIBUTE,\n other_node.get_attribute_value(StoerWagner.CONNECTION_STRENGTH_ATTRIBUTE) + edge.get_weight()\n ) # add to the connection strength, the weight of the edge\n return G # return the graph with the connection strength information", "def layer_offsets(self):\n ...", "def offset_at_position(self, position: Position) -> int:\n lines = self.lines\n pos = position_from_utf16(lines, position)\n row, col = pos.line, pos.character\n return col + sum(utf16_num_units(line) for line in lines[:row])", "def min_offset(self):\n return self.offset", "def get_rel_pos(gRNA, min_anchor_length):\n if gRNA['cassette_label'] == 'Orphan':\n return -gRNA['gene_rel_start']\n\n if gRNA['strand'] == 'coding':\n rel_pos = gRNA['circle_start']-gRNA['forward_end']-gRNA['gene_rel_start']\n else:\n rel_pos = gRNA['reverse_start']-gRNA['gene_rel_start']-gRNA['circle_end']\n if rel_pos is pd.NA:\n rel_pos = 0\n\n if rel_pos < 0:\n # find position of first non-WC in pairing\n # If the resulting shortening of the alignment causes the anchor to be\n # less than the min anchor length, make rel_pos just past the non-WC bp\n match = mm_regex.search(gRNA['pairing'][::-1])\n mm_dist = match.start(0)\n if mm_dist + rel_pos < min_anchor_length:\n rel_pos = -(mm_dist+1)\n return rel_pos", "def getLineFollowValue() -> int:\n pass", "def Nodelocation(self, Tract_pop, Tractx, Tracty, longitude, latitude, cnum):\n import annealsimulation\n \n self.latl, self.lonl = [], []\n \n while(len(self.latl) != self.nodenum):\n lat = np.random.randint(len(self.Geoy) - 1)\n lon = np.random.randint(len(self.Geox) - 1)\n if(lat not in self.latl or lon not in self.lonl):\n self.latl.append(lat)\n self.lonl.append(lon) \n \n self.latl, self.lonl = np.array(self.latl), np.array(self.lonl)\n \n self.demandlat, self.demandlon = self.latl[self.demandseries], self.lonl[self.demandseries]\n self.tranlat, self.tranlon = self.latl[self.transeries], self.lonl[self.transeries]\n self.supplylat, self.supplylon = self.latl[self.supplyseries], self.lonl[self.supplyseries]\n \n self.demandloc = np.stack((self.demandlat, self.demandlon)).transpose()\n self.tranloc = np.stack((self.tranlat, self.tranlon)).transpose()\n self.supplyloc = np.stack((self.supplylat, self.supplylon)).transpose()\n \n #Demand node\n Geox1 = sf.FeatureScaling(self.Geox)\n Geoy1 = sf.FeatureScaling(self.Geoy)\n Tract_pop1 = sf.FeatureScaling(Tract_pop)\n Tractx1 = sf.FeatureScaling(Tractx)\n Tracty1 = sf.FeatureScaling(Tracty)\n \n self.demandloc, self.demandc, self.popuassign = ans.anneal2(self.demandloc, 'Population', Geox1, Geoy1, Tract_pop1, Tractx1, Tracty1, Tract_pop, cnum)\n self.demandy1 = Geoy1[self.demandloc[:, 0]]\n self.demandx1 = Geox1[self.demandloc[:, 1]]\n self.demandy = self.Geoy[self.demandloc[:, 0]]\n self.demandx = self.Geox[self.demandloc[:, 1]]\n #Transmission node\n self.tranloc, self.tranc, temp = ans.anneal2(self.tranloc, 'Facility', Geox1, Geoy1, Tract_pop1, self.demandx1, self.demandy1, Tract_pop, cnum)\n self.trany1 = Geoy1[self.tranloc[:, 0]]\n self.tranx1 = Geox1[self.tranloc[:, 1]]\n self.trany = self.Geoy[self.tranloc[:, 0]]\n self.tranx = self.Geox[self.tranloc[:, 1]]\n\n #Supply node\n self.supplyloc, self.supplyc, temp = ans.anneal2(self.supplyloc, 'Facility', Geox1, Geoy1, Tract_pop1, self.tranx1, self.trany1, Tract_pop, cnum)\n self.supplyy1 = Geoy1[self.supplyloc[:, 0]]\n self.supplyx1 = Geox1[self.supplyloc[:, 1]] \n self.supplyy = self.Geoy[self.supplyloc[:, 0]]\n self.supplyx = self.Geox[self.supplyloc[:, 1]]\n \n ##Coordinates of nodes\n self.y = np.concatenate((self.supplyy, self.trany, self.demandy))\n self.x = np.concatenate((self.supplyx, self.tranx, self.demandx))\n \n ##Latitudes and longitudes of nodes\n self.demandlatitude, self.demandlongitude = latitude[self.demandloc[:, 0]], longitude[self.demandloc[:, 1]]\n self.tranlatitude, self.tranlongitude = latitude[self.tranloc[:, 0]], longitude[self.tranloc[:, 1]]\n self.supplylatitude, self.supplylongitude = latitude[self.supplyloc[:, 0]], longitude[self.supplyloc[:, 1]]\n \n self.latitude = np.concatenate((self.supplylatitude, self.tranlatitude, self.demandlatitude))\n self.longitude = np.concatenate((self.supplylongitude, self.tranlongitude, self.demandlongitude))", "def Compute_Offset( self, Bearing, Distance ):\n xoff = random.uniform( 0, Distance )\n yoff = random.uniform( 0, Distance )\n if( (Bearing >= 0) & (Bearing <= 90 ) ):\n return(0+xoff, 0+yoff )\n elif( (Bearing > 90) & (Bearing <= 180) ):\n return(0+xoff, 0-yoff )\n elif( (Bearing > 180) & (Bearing <= 270) ):\n return(0-xoff, 0-yoff )\n elif( (Bearing > 270) & (Bearing <= 360) ):\n return(0-xoff, 0+yoff )\n else:\n return(0, 0)", "def offsetline(linen, pattern_result):\n\n if \"nlines\" in pattern_result:\n nlines = pattern_result[\"nlines\"]\n else:\n nlines = 0\n new_linen = linen - nlines - 1\n if new_linen < 0:\n return 0\n else:\n return new_linen", "def set_sg_offset():\n offset = request.params.get(\"offset\", 0, type=float)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_GenOffset(output, ctypes.c_float(offset))\n if retval != 0:\n LOG.error(\"Failed to set signal generator offset. Error code: %s\", ERROR_CODES[retval])", "def node_pos(self, spacing, type, layer, node):\n width = (self.num_hidden_layers + 3) * spacing * 2\n values = [self.num_input_nodes, self.num_hidden_nodes, self.num_output_nodes]\n values.sort(reverse=True)\n height = (values[0] + 1) * spacing\n h_percentile = height - spacing\n w_percentile = (width - (spacing * 2)) / (self.num_hidden_layers + 2)\n if type == 'input':\n pos_y = h_percentile / (self.num_input_nodes + 1)\n pos_y *= (node + 1)\n pos_x = w_percentile\n return (pos_x, pos_y)\n elif type == 'hidden':\n pos_y = h_percentile / (self.num_hidden_nodes + 1)\n pos_y *= (node + 1)\n pos_x = w_percentile * (layer + 2)\n return (pos_x, pos_y)\n elif type == 'output':\n pos_y = h_percentile / (self.num_output_nodes + 1)\n pos_y *= (node + 1)\n pos_x = w_percentile * (self.num_hidden_layers + 2)\n return (pos_x, pos_y)\n else:\n print(\"Invalid argument: type\")\n return 1", "def byDistance(graph, distance, edgeAttrs=[]):\n \n #==============================================================================\n # fromNode, toNode, {Ind, coordinates, distance, center}\n # fromNode (tuple): coordinate of start node\n # toNode (tuple): coordinate of end node\n # Ind (int): identifier\n # coordinates (doubl list): a list of coordinates of vertices of the segment\n # distnace (float): distance of the segment\n # center (list): coordinate of center of the segment\n #==============================================================================\n \n splitGraph = nx.DiGraph()\n \n nodeList = split._splitNodes(graph, distance)\n \n splitGraph.add_nodes_from(nodeList)\n \n attrsDict = {}\n \n splitEdgeInd = 0\n for edgeInd, edge in enumerate(graph.edges(data=True)):\n vertexDict = {}\n prevVertex = None\n cumulDistance = 0\n \n # for each eage, locate the nodes between the corrseponding vertices\n for ind, vertex in enumerate(edge[2]['coordinates']):\n if ind == 0:\n prevVertex = vertex\n \n # add the first vertex in the vertex list\n vertexDict[cumulDistance] = prevVertex\n else:\n currVertex = vertex\n \n \n # find the nodes which are between the prevVertex and currVertex\n m, b, d = geometry.lineSpec(prevVertex, currVertex)\n \n for node in nodeList:\n if m == True: # parallel to y axis\n if node[0] == b and \\\n (((prevVertex[1] <= node[1]) and (node[1] <= currVertex[1])) or\\\n ((currVertex[1] <= node[1]) and (node[1] <= prevVertex[1]))):\n # if the node is between the vertices, \n # calculate the length between the prevVertex and the node\n seglen = geometry.lineLength(prevVertex, node)\n # add the node into the dictionary\n vertexDict[seglen+cumulDistance] = node\n \n elif m == False:\n if node[1] == b and \\\n (((prevVertex[0] <= node[0]) and (node[0] <= currVertex[0])) or\\\n ((currVertex[0] <= node[0]) and (node[0] <= prevVertex[0]))):\n # if the node is between the vertices, \n # calculate the length between the prevVertex and the node\n seglen = geometry.lineLength(prevVertex, node)\n # add the node into the dictionary\n vertexDict[seglen+cumulDistance] = node\n \n else:\n if(abs(node[1] - (m*node[0] + b)) < 0.05) and \\\n (((prevVertex[0] <= node[0]) and (node[0] <= currVertex[0])) or\\\n ((currVertex[0] <= node[0]) and (node[0] <= prevVertex[0]))) and\\\n (((prevVertex[1] <= node[1]) and (node[1] <= currVertex[1])) or\\\n ((currVertex[1] <= node[1]) and (node[1] <= prevVertex[1]))):\n # if the node is between the vertices, \n # calculate the length between the prevVertex and the node\n seglen = geometry.lineLength(prevVertex, node)\n # add the node into the dictionary\n vertexDict[seglen+cumulDistance] = node\n \n cumulDistance += geometry.lineLength(prevVertex, currVertex)\n vertexDict[cumulDistance] = currVertex\n \n prevVertex = currVertex\n \n \n orderedVertexList = []\n for key in sorted(vertexDict.keys()):\n orderedVertexList.append(list(vertexDict[key]))\n \n startInd1 = 0\n for ind1, vertex1 in enumerate(orderedVertexList):\n # check whether vertex1 is in nodelist\n endInd1 = 0\n for splitNode in nodeList:\n if tuple(vertex1) == splitNode:\n endInd1 = ind1\n \n coordList = orderedVertexList[startInd1:endInd1 + 1]\n startVertex = tuple(orderedVertexList[startInd1])\n endVertex = tuple(orderedVertexList[endInd1])\n \n if startVertex != endVertex:\n splitGraph.add_edge(startVertex, endVertex, Ind=splitEdgeInd, coordinates = coordList)\n \n attrsDict[(startVertex, endVertex)] = {}\n for edgeAttr in edgeAttrs:\n attrsDict[(startVertex, endVertex)][edgeAttr] = edge[2][edgeAttr]\n \n startInd1 = endInd1\n splitEdgeInd+=1\n# print(edgeAttrs)\n# if edgeAttrs != []:\n# for attr in edgeAttrs:\n# print(attr)\n# attrDict = {key1:val1[attr] for key1, val1 in attrsDict.items()}\n# print(attrDict)\n# nx.set_edge_attributes(splitGraph, attr, attrDict)\n \n for edge in splitGraph.edges(data=True):\n for attr in edgeAttrs:\n edge[2][attr] = attrsDict[(edge[0], edge[1])][attr]\n \n \n graphCalculate.addDistance(splitGraph)\n graphCalculate.addCenter(splitGraph)\n return splitGraph", "def relative_rate(self):\n return _spacegrant_swig.udp_debug_sptr_relative_rate(self)", "def compute_node_positions(self):\n pass", "def getTelescopeCoords(self):\n return self.header['ANT_X'],self.header['ANT_Y'],self.header['ANT_Z']", "def attention(self):\n center_of_attention = 0\n distance = 10000\n for person in self.peoples:\n if person is not None:\n if person.X < distance: #person's depth is now their X position in edwin frame\n center_of_attention = person.ID\n distance = person.X\n\n if center_of_attention != 0:\n return center_of_attention", "def offset(self):\n self._fetch_if_needed()\n return self._offset", "def distance_modulus(self):\n return 5*np.log10(self.parallax.to(u.pc, u.parallax())/10*u.pc)", "def wm_offset(self):\n return self.get_par(\"offset\")", "def NPL(self):\n self.edge = np.zeros((np.sum(self.Adjmatrix), 3))\n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n if(self.Adjmatrix[i, j] == 1):\n self.edge[Temp, 0], self.edge[Temp, 1], self.edge[Temp, 2] = i, j, self.Dismatrix[i, j]\n Temp += 1\n \n self.Totallength = ((np.max(self.Geox) - np.min(self.Geox))**2 + (np.max(self.Geoy) - np.min(self.Geoy))**2)**0.5\n self.norm_edge = self.edge[:, 2]/self.Totallength", "def relative_rate(self):\n return _spacegrant_swig.ax25_pdu_packer_sptr_relative_rate(self)", "def position_in_network(self, l, n):\n\n pos = n\n for i in range(l):\n pos += self.get_layer(i).num_nodes\n\n return pos", "def mc_update_xy(self):\n i = random.randint(0,self.N-1)\n return self.mc_update_fixed(i,xy = True)", "def setup_propagator(self):\n self.propagator = create_propagator_matrix(self.A, self.args.alpha, self.args.model)\n if self.args.model==\"exact\":\n self.propagator = self.propagator.to(self.device)\n else:\n self.edge_indices = self.propagator[\"indices\"].to(self.device)\n self.edge_weights = self.propagator[\"values\"].to(self.device)", "def setup_propagator(self):\n self.propagator = create_propagator_matrix(self.A, self.args.alpha, self.args.model)\n if self.args.model==\"exact\":\n self.propagator = self.propagator.to(self.device)\n else:\n self.edge_indices = self.propagator[\"indices\"].to(self.device)\n self.edge_weights = self.propagator[\"values\"].to(self.device)", "def setup_propagator(self):\n self.propagator = create_propagator_matrix(self.A, self.args.alpha, self.args.model)\n if self.args.model==\"exact\":\n self.propagator = self.propagator.to(self.device)\n else:\n self.edge_indices = self.propagator[\"indices\"].to(self.device)\n self.edge_weights = self.propagator[\"values\"].to(self.device)", "def undirected_diameter(self) -> int:\n return nx.diameter(self.to_undirected())", "def origins_and_displacements(self):\n origin0 = self.nodes[0]\n disp0 = self.nodes[1] - self.nodes[0]\n origin1 = self.nodes[1]\n disp1 = self.nodes[0] - self.nodes[0]\n return origin0, disp0, origin1, disp1", "def generateOffset(self, offset, max_dist):\n settings.space_by_type['offset'] = 1000000\n self._set_sections(offset, max_dist)\n self.offset = True", "def calc_generation_wind_proposed (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.load_offset_proposed = \\\n self.comp_specs['proposed capacity']\n self.generation_wind_proposed = \\\n self.comp_specs['proposed generation']\n\n if self.generation_wind_proposed == UNKNOWN:\n self.generation_wind_proposed = self.load_offset_proposed *\\\n float(self.comp_specs\\\n ['capacity factor'])*\\\n constants.hours_per_year\n\n return\n\n self.load_offset_proposed = 0\n\n offset = self.average_load*\\\n (self.comp_specs['percent generation to offset'] / 100.0)\n #~ print self.forecast.generation['generation hydro'].sum()\n\n # removed on purpose\n #~ hydro = \\\n #~ self.forecast.generation['generation hydro'].fillna(0).sum()\n #~ if hydro > 0:\n #~ offset *= 2\n\n # existing very variable RE\n existing_RE = \\\n int(float(self.cd['wind capacity'])) + \\\n int(float(self.cd['solar capacity']))\n\n if existing_RE < (round(offset/25) * 25): # ???\n #~ print \"True\"\n self.load_offset_proposed = round(offset/25) * 25 - existing_RE\n\n\n\n # not needed for now\n #~ self.total_wind_generation = self.generation_load_proposed + \\\n #~ int(self.comp_specs['wind capacity'])\n\n self.generation_wind_proposed = self.load_offset_proposed * \\\n float(self.comp_specs['capacity factor'])*\\\n constants.hours_per_year\n #~ print 'self.load_offset_proposed',self.load_offset_proposed\n #~ print 'self.generation_wind_proposed',self.generation_wind_proposed", "def get_position(self):\n return self._I85_msg_from_device(self.node.sdo[0x6064].phys) # rad", "def relative_rate(self):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_relative_rate(self)", "def determine_measure_position(self):\n green_probs = []\n net_size = len(self.net)\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in range(0, net_size):\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.net[i].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.net[i].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.net[i].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.net[i].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.net[i].value * self.ct[4][0]\n green_probs.append(accum)\n #Returns the position in which the probability of\n #obtaining green when measuring is the highest.\n return self.net[np.argmax(green_probs)].id", "def _calculate_label_offset(port):\n offset_position = np.array((-cos(pi/180 * port.orientation),\n -sin(pi/180 * port.orientation)))\n offset_position *= port.width * 0.05\n return offset_position" ]
[ "0.5927528", "0.57913303", "0.5498828", "0.54520005", "0.5265909", "0.52382195", "0.520508", "0.5108444", "0.50968987", "0.5064629", "0.5039651", "0.5012671", "0.4999943", "0.4968546", "0.4968546", "0.49591216", "0.4952447", "0.49499637", "0.49423927", "0.49011195", "0.4898525", "0.4898525", "0.4898525", "0.4897764", "0.48889238", "0.488882", "0.48787028", "0.48787028", "0.48787028", "0.48787028", "0.48787028", "0.48787028", "0.48787028", "0.48578855", "0.48480916", "0.48445654", "0.48158583", "0.48079413", "0.48023248", "0.4794563", "0.478763", "0.47749957", "0.47678623", "0.4758064", "0.4748903", "0.47396564", "0.4732899", "0.47257838", "0.47213024", "0.4712898", "0.47063842", "0.47034943", "0.47000676", "0.46959653", "0.4690535", "0.4686829", "0.46815136", "0.465522", "0.46501195", "0.46448347", "0.46444118", "0.46401024", "0.462977", "0.4625979", "0.4621821", "0.46119294", "0.460731", "0.46053815", "0.4605316", "0.45996234", "0.45953733", "0.459355", "0.45867005", "0.45829", "0.45795026", "0.4575103", "0.45724723", "0.4568673", "0.45586953", "0.45582786", "0.45515618", "0.4548036", "0.45417774", "0.45353037", "0.45348966", "0.4525395", "0.45179138", "0.45175982", "0.45152384", "0.4503776", "0.4503776", "0.4503776", "0.4501092", "0.4492558", "0.44888088", "0.44838703", "0.44767216", "0.44755062", "0.44741276", "0.44735387" ]
0.62144107
0
Calculate the status quo generation in the community . Attributes
def calc_pre_intertie_generation (self): self.pre_intertie_generation = \ self.forecast.get_generation(self.start_year,self.end_year) gen_eff = self.cd["diesel generation efficiency"] self.pre_intertie_generation_fuel_used = \ self.pre_intertie_generation / gen_eff #~ print 'self.baseline_generatio',self.baseline_generation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_shield(self,obs):", "def advancedStats():", "def calc_stat_values(self):", "def setMyStatus(self):\n self.clearMyStatus()\n for id, myComponent in self.components.iteritems():\n self.currentComps += 1\n if myComponent.type != '':\n compData = self.componentdata[myComponent.type]\n if myComponent.currentHP == myComponent.myComponentData.maxHP:\n # regular component set quad Attributes\n if compData.typeAP != '':\n self.typeAP = compData.typeAP\n elif compData.assault > 0:\n self.maxAssault += compData.assault\n self.maxAP += compData.maxAP\n self.maxSP += compData.maxSP\n self.genSP += compData.genSP\n self.maxPower += compData.power\n self.maxBattery += compData.battery\n self.thrust += compData.engine\n self.rotation += compData.rotate\n self.radar += compData.radar\n self.jamming += compData.jamming\n self.repair += compData.repair\n self.target += compData.target\n self.mass += compData.mass\n \n # tell weapons in quad to recalc their status\n for id, myWeapon in self.weapons.iteritems():\n myWeapon.setMyStatus()", "def stats(self):", "def summary(self,attr='raw'):\n g = {}\n g['gid'] = map(lambda x : x.gid, self.taxonomies)\n g['sp'] = map(lambda x : x.presences.species , self.taxonomies)\n \n g['gns'] = map(lambda x : x.presences.genera , self.taxonomies) \n g['fam'] = map(lambda x : x.presences.families , self.taxonomies)\n g['ord'] = map(lambda x : x.presences.orders , self.taxonomies)\n g['cls'] = map(lambda x : x.presences.classes , self.taxonomies)\n g['phy'] = map(lambda x : x.presences.phyla , self.taxonomies)\n g['kng'] = map(lambda x : x.presences.kingdoms , self.taxonomies)\n #g['all'] = map(lambda x : (x.gid,int(x.presences.species),int(x.genera),int(x.families),int(x.orders),int(x.classes),int(x.phyla),int(x.kingdoms)),self.taxonomies)\n keys = settings.TAXONOMIC_TREE_KEYS\n if attr == 'int':\n for key in keys:\n g[key] = map(lambda p : int(p) ,g[key])\n elif attr == 'str':\n for key in keys:\n g[key] = map(lambda p : str(p) ,g[key]) \n elif attr == 'list':\n for key in keys:\n g[key] = map(lambda p : p.list ,g[key]) \n elif attr == 'mapping':\n for key in keys:\n g[key] = map(lambda p : p.map ,g[key]) \n elif attr == 'raw':\n return g\n else:\n logger.error(\"Wrong attribute selection\")\n return None\n \n return g", "def compute_statistics(self):", "def get_status(self):\n\n status = \"\"\"\n-----------------GENERAL USER INFO-----------------\n Name: {name}\n Age: {age}\n Age Range: {age_range}\n Height: {height}\n Weight: {weight}\n Pregnancy Status: {pregnancy_status}\n Breastfeeding Status: {breastfeeding_status}\n\n-----------------USER'S RECOMMENDED DAILY VITAMIN AND MINERAL COUNT-------------\n (in milligrams, mg)\n Vitamin A: {user_vitamin_a}\n Vitamin B6: {user_vitamin_b_6}\n Vitamin B12: {user_vitamin_b_12}\n Vitamin C: {user_vitamin_c}\n Vitamin D: {user_vitamin_d}\n Vitamin E: {user_vitamin_e}\n \n Calcium: {user_calcium}\n Iron: {user_iron}\n Magnesium: {user_magnesium}\n Potassium: {user_potassium}\n Zinc: {user_zinc}\n \"\"\"\n return status.format(name=self.name, age=self.age, age_range=self.age_range, height=self.height, weight=self.weight, pregnancy_status=self.pregnancy_status, \n breastfeeding_status=self.breastfeeding_status, user_vitamin_a=self.user_vitamin_a,\n user_vitamin_b_6=self.user_vitamin_b_6, user_vitamin_b_12=self.user_vitamin_b_12, user_vitamin_c=self.user_vitamin_c, user_vitamin_d=self.user_vitamin_d, user_vitamin_e=self.user_vitamin_e,\n user_calcium=self.user_calcium, user_iron=self.user_iron, user_magnesium=self.user_magnesium, user_potassium=self.uesr_potassium, user_zinc=self.user_zinc)", "def status(self):", "def setMyStatus(self):\n self.clearMyStatus()\n self.mass = self.myShipHull.mass\n for position, myQuad in self.quads.iteritems():\n self.maxBattery += myQuad.maxBattery\n self.currentPower += myQuad.maxPower\n self.thrust += myQuad.thrust\n self.rotation += myQuad.rotation\n self.radar += myQuad.radar\n self.jamming += myQuad.jamming\n self.repair += myQuad.repair\n self.mass += myQuad.mass\n self.maxAssault += myQuad.maxAssault\n\n # scale back attributes if internal structure has been hit\n ratio = self.currentISP/self.myShipHull.maxISP\n self.currentPower = self.currentPower * ratio\n self.thrust = self.thrust * ratio\n self.rotation = self.rotation * ratio\n\n self.accel = self.myDesign.getAccel(self.thrust, self.mass)\n self.accel = self.accel\n\n self.rotation = self.myDesign.getRotation(self.rotation, self.mass)\n self.rotation = self.rotation\n self.setMyStrength()\n self.setWeaponStatus()\n self.setRange()\n self.setAssaultStrength(ratio)", "def getQValue(self, state, action):\n #print \"getQValue in ApproximateQAgent\"\n\n \"*** YOUR CODE HERE ***\"\n weights = self.getWeights()\n features = self.featExtractor.getFeatures(state, action, self)\n\n value = 0\n\n #print \"FEATURES: \", features\n #print \"WEIGHTS: \", weights\n\n for feature in features:\n value += features[feature]*weights[feature]\n return value\n #util.raiseNotDefined()", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0.0\n for feature_name, value in self.featExtractor.getFeatures(state, action).iteritems():\n qvalue += value * self.weights[feature_name]\n return qvalue", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0.0\n for feature_name, value in self.featExtractor.getFeatures(state, action).iteritems():\n qvalue += value * self.weights[feature_name]\n return qvalue", "def calc_resources(self):\n self.popula = self.energy = self.popula_used = self.energy_used = 0\n self.cnt_public = self.cnt_shop = self.cnt_1 = self.cnt_2 = self.cnt_3 = self.cnt_4 = self.cnt_5 = self.cnt_office = 0\n self.popula += self.extra_pop\n for i in range(20):\n b = self.b[i]\n if b == 'T':\n self.popula += self.f[i] * 2\n self.energy_used += 1\n elif b == 'O':\n self.popula_used += 1\n self.energy_used += 1\n self.cnt_office += self.f[i]\n elif b == 'U':\n self.popula_used += 1\n self.cnt_public += 1\n elif b == 'S':\n self.energy_used += 1\n self.cnt_shop += 1\n elif b == '1':\n self.popula += 1\n self.energy += 1\n self.popula_used += 1\n self.cnt_1 += 1\n elif b == '2':\n self.popula_used += 1\n self.cnt_2 += 1\n elif b == '3':\n self.popula_used += 1\n self.cnt_3 += 1\n elif b == '4':\n self.popula += 2\n self.popula_used += 1\n self.cnt_4 += 1\n elif b == '5':\n self.energy += 2\n self.popula_used += 1\n self.cnt_5 += 1\n elif b == 'A':\n self.energy += 2\n self.popula_used += 1\n elif b == 'F':\n self.energy += 3\n self.popula_used += 1\n elif b == 'G':\n self.popula += 1\n if 'tvst' in args.exp:\n self.popula += self.cnt_shop\n if 'ward' in args.exp:\n self.popula += 3\n if 'elec' in args.exp:\n self.energy += 3\n if 'capi' in args.exp:\n self.popula_used += 2\n if 'fire' in args.exp:\n self.popula_used += 1\n if 'park' in args.exp:\n self.popula_used += 1", "def produce(self, request, meta, raven_variables, dispatch, t, level=None):\n #balance = defaultdict(float)\n interaction = self.get_interaction()\n balance, meta = interaction.produce(request, meta, raven_variables, dispatch, t, level)\n #for resource, quantity in int_balance.items():\n # balance[resource] += quantity\n return balance, meta", "def healthcare():", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0\n features = self.featExtractor.getFeatures(state, action)\n #Each feature is in the form of dictionary {((3, 3), 'east'): 1.0}. Each key is a combination of coordinate and direction. Each value represents the old qvalue.\n for feature in features.keys():\n qvalue += features[feature] * self.weights[feature]\n return qvalue", "def _compute_wo(self):\n self.uom_id = self.wo_id.uom_id.id\n self.final_product_id = self.wo_id.final_product_id.id\n if not self.supplier_id:\n self.service_ids = [x.product_id.id for x in self.wo_id.consumed_service_ids if x.product_id]\n else:\n service_ids = [x.product_id.id for x in self.wo_id.consumed_service_ids if x.product_id]\n if service_ids:\n product_supp_rcs = self.env['product.supplierinfo'].search([('partner_id', '=', self.supplier_id.id), ('product_id', 'in', service_ids)])\n if product_supp_rcs:\n product_ids = [x.product_id.id for x in product_supp_rcs if x.product_id]\n else:\n product_ids = []\n else:\n product_ids = [] \n \n self.service_ids = product_ids\n \n # Permet de savoir si l'OT suivant est de sous-traitance et qu'il y a qu'un seul OT suivant\n no_direct_fp = True\n if self.wo_id.next_wo_ids and len(self.wo_id.next_wo_ids) == 1 and self.wo_id.next_wo_ids[0].is_subcontracting and self.wo_id.next_wo_ids[0].consumed_service_ids:\n no_direct_fp = False\n \n self.no_direct_fp = no_direct_fp", "def update(self):\n self.data.update()\n stats = self.data.stats\n ticker = self.data.ticker\n\n if self.type == \"exchangerate\":\n self._attr_state = ticker[self._currency].p15min\n self._attr_unit_of_measurement = self._currency\n elif self.type == \"trade_volume_btc\":\n self._attr_state = f\"{stats.trade_volume_btc:.1f}\"\n elif self.type == \"miners_revenue_usd\":\n self._attr_state = f\"{stats.miners_revenue_usd:.0f}\"\n elif self.type == \"btc_mined\":\n self._attr_state = str(stats.btc_mined * 0.00000001)\n elif self.type == \"trade_volume_usd\":\n self._attr_state = f\"{stats.trade_volume_usd:.1f}\"\n elif self.type == \"difficulty\":\n self._attr_state = f\"{stats.difficulty:.0f}\"\n elif self.type == \"minutes_between_blocks\":\n self._attr_state = f\"{stats.minutes_between_blocks:.2f}\"\n elif self.type == \"number_of_transactions\":\n self._attr_state = str(stats.number_of_transactions)\n elif self.type == \"hash_rate\":\n self._attr_state = f\"{stats.hash_rate * 0.000001:.1f}\"\n elif self.type == \"timestamp\":\n self._attr_state = stats.timestamp\n elif self.type == \"mined_blocks\":\n self._attr_state = str(stats.mined_blocks)\n elif self.type == \"blocks_size\":\n self._attr_state = f\"{stats.blocks_size:.1f}\"\n elif self.type == \"total_fees_btc\":\n self._attr_state = f\"{stats.total_fees_btc * 0.00000001:.2f}\"\n elif self.type == \"total_btc_sent\":\n self._attr_state = f\"{stats.total_btc_sent * 0.00000001:.2f}\"\n elif self.type == \"estimated_btc_sent\":\n self._attr_state = f\"{stats.estimated_btc_sent * 0.00000001:.2f}\"\n elif self.type == \"total_btc\":\n self._attr_state = f\"{stats.total_btc * 0.00000001:.2f}\"\n elif self.type == \"total_blocks\":\n self._attr_state = f\"{stats.total_blocks:.0f}\"\n elif self.type == \"next_retarget\":\n self._attr_state = f\"{stats.next_retarget:.2f}\"\n elif self.type == \"estimated_transaction_volume_usd\":\n self._attr_state = f\"{stats.estimated_transaction_volume_usd:.2f}\"\n elif self.type == \"miners_revenue_btc\":\n self._attr_state = f\"{stats.miners_revenue_btc * 0.00000001:.1f}\"\n elif self.type == \"market_price_usd\":\n self._attr_state = f\"{stats.market_price_usd:.2f}\"", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # print \"getQValue\"\n features = self.featExtractor.getFeatures(state, self.index)#.values()\n #weights = self.weights.values()\n #dotProduct = reduce( (lambda x, y: x*y), map( (lambda x, y: x+y), self.weights, features))\n #return dotProduct\n score = 0\n for key in features.keys():\n score += features[key]*self.weights[key]\n return score", "def recode_RelGQAttr():\n LIVE_ALONE = \"LIVE_ALONE\"\n RELSHIP = \"RELSHIP\"\n RTYPE = \"RTYPE\"\n GQTYPE = \"GQTYPE\"\n \n sql = ['case']\n # live alone / not live alone\n sql += [f\"when {LIVE_ALONE} = '1' and {RELSHIP} = '20' and {RTYPE} = '3' and {GQTYPE} = '000' then '0'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '20' and {RTYPE} = '3' and {GQTYPE} = '000' then '1'\"]\n # relationship\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '21' and {RTYPE} = '3' and {GQTYPE} = '000' then '2'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '22' and {RTYPE} = '3' and {GQTYPE} = '000' then '3'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '23' and {RTYPE} = '3' and {GQTYPE} = '000' then '4'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '24' and {RTYPE} = '3' and {GQTYPE} = '000' then '5'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '25' and {RTYPE} = '3' and {GQTYPE} = '000' then '6'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '26' and {RTYPE} = '3' and {GQTYPE} = '000' then '7'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '27' and {RTYPE} = '3' and {GQTYPE} = '000' then '8'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '28' and {RTYPE} = '3' and {GQTYPE} = '000' then '9'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '29' and {RTYPE} = '3' and {GQTYPE} = '000' then '10'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '30' and {RTYPE} = '3' and {GQTYPE} = '000' then '11'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '31' and {RTYPE} = '3' and {GQTYPE} = '000' then '12'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '32' and {RTYPE} = '3' and {GQTYPE} = '000' then '13'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '33' and {RTYPE} = '3' and {GQTYPE} = '000' then '14'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '34' and {RTYPE} = '3' and {GQTYPE} = '000' then '15'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '35' and {RTYPE} = '3' and {GQTYPE} = '000' then '16'\"]\n sql += [f\"when {LIVE_ALONE} = '2' and {RELSHIP} = '36' and {RTYPE} = '3' and {GQTYPE} = '000' then '17'\"]\n # gq types\n # institutional\n # 100s\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '101' then '18'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '102' then '19'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '103' then '20'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '104' then '21'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '105' then '22'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '106' then '23'\"]\n # 200s\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '201' then '24'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '202' then '25'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '203' then '26'\"]\n # 300s\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '301' then '27'\"]\n # 400s\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '401' then '28'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '402' then '29'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '403' then '30'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '404' then '31'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '37' and {RTYPE} = '5' and {GQTYPE} = '405' then '32'\"]\n # non-institutional\n # 500s\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '38' and {RTYPE} = '5' and {GQTYPE} = '501' then '33'\"]\n # 600s\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '38' and {RTYPE} = '5' and {GQTYPE} = '601' then '34'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '38' and {RTYPE} = '5' and {GQTYPE} = '602' then '35'\"]\n # 700s\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '38' and {RTYPE} = '5' and {GQTYPE} = '701' then '36'\"]\n # 800s\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '38' and {RTYPE} = '5' and {GQTYPE} = '801' then '37'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '38' and {RTYPE} = '5' and {GQTYPE} = '802' then '38'\"]\n # 900s\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '38' and {RTYPE} = '5' and {GQTYPE} = '900' then '39'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '38' and {RTYPE} = '5' and {GQTYPE} = '901' then '40'\"]\n sql += [f\"when {LIVE_ALONE} = '0' and {RELSHIP} = '38' and {RTYPE} = '5' and {GQTYPE} = '997' then '41'\"]\n\n sql += ['else -1']\n sql += ['end']\n sql = \"\\n\".join(sql)\n\n sqldict = { schemamaker.RELGQ.getName(): sql }\n return sqldict", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n\n QValue = 0\n for feature in feature_dictionary:\n QValue += self.weights[feature] * feature_dictionary[feature]\n return QValue", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n\n QValue = 0\n for feature in feature_dictionary:\n QValue += self.weights[feature] * feature_dictionary[feature]\n return QValue", "def getQValue(self, state, action):\n \"\"\"Description:\n [Enter a description of what you did here.]\n Use first equation in slide 71 of MDP to compute q-value depond on weights and current features.\n \n !! But I think what I did is not work for IdentityExtractor. Because feature of IdentityExtrator always return 1,\n it did not change even a ghost is closing.\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n qValue = self.weight * self.featExtractor.getFeatures(state,action)\n return qValue\n \"\"\" END CODE \"\"\"", "def getStatus(self, includeVotes=False):\n #1) Calculate beliefs\n #TODO want to cache beliefs and make incremental updates\n print \"Calculating beliefs\"\n beliefs = self.calculateBeliefs()\n\n #2) Get POMDP data for each question\n print \"Getting POMDP decision for each question\"\n out = {}\n for (q_id,belief) in beliefs.iteritems():\n print \"q_id\", q_id\n\n #get POMDP action reward pairs\n action_rewards = {str(a):r for a,r in self.policy.get_action_rewards(belief).iteritems()}\n\n #which action has best expected reward\n best_action, best_expected_reward = self.policy.get_best_action(belief)\n\n #get best action as readable string (submit-true, etc.)\n best_action_str = self.pomdp_var.actions[best_action]\n\n out[q_id] = dict(best_action=best_action,\n best_expected_reward=best_expected_reward,\n best_action_str=best_action_str,\n action_rewards=action_rewards)\n\n #optional get all votes on this question as JSON\n #XXX NOTE SLOW! Has to access database\n if includeVotes:\n answers = self.getQuestionCompletedAnswers(q_id)\n votes = []\n for answer in answers:\n q_name = answer.question.name\n w_id = str(answer.worker.id)\n w_platform_id = str(answer.worker.platform_id)\n w_skill = answer.worker.inference_results['EM']['skill']\n value = answer.value\n vote = {\"worker_id\": w_id, \"worker_platform_id\":w_platform_id, \"est_skill\":w_skill, \"value\":value}\n votes.append(vote)\n\n out[q_id]['votes'] = votes\n\n return out", "def stateQualtityScore(roomba):\n return 0", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n Q_Value = 0 #initializing q value\n\n feat_Extractor = self.featExtractor\n\n weight = self.weights #To get the weight to control exploration and exploitation\n\n features = feat_Extractor.getFeatures(state,action) #to get all the features associated with (state,action) pair\n\n for each_feature in features:\n #refer to README_Reinforcement.txt for the formula at line 11\n temp_Qvalue = weight[each_feature] * features[each_feature] #Q(state,action) = w * featureVector where * is the dotProduct operator\n Q_Value = Q_Value + temp_Qvalue\n\n return Q_Value #Returns final qvalue\n #util.raiseNotDefined()", "def __init__(self):\n self.processingMethod = \"Au\" \n self.processingLoss = 0.10\n self.refiningTake = 0.10\n self.processingPower = []", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def Mutation(self, state):\n changed = False;\n #-------------------------------------------------------\n # MUTATE CONDITION\n #-------------------------------------------------------\n for att in range(cons.env.format_data.numb_attributes): #Each condition specifies different attributes, so we need to go through all attributes in the dataset.\n att_info = cons.env.format_data.attribute_info[att]\n if random.random() < cons.mu and state[att] != cons.label_missing_data:\n #MUTATION--------------------------------------------------------------------------------------------------------------\n if att not in self.specified_attributes: #Attribute not yet specified\n self.specified_attributes.append(att)\n self.condition.append(self.buildMatch(att, state)) #buildMatch handles both discrete and continuous attributes\n changed = True\n\n elif att in self.specified_attributes: #Attribute already specified\n i = self.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n #-------------------------------------------------------\n # DISCRETE OR CONTINUOUS ATTRIBUTE - remove attribute specification with 50% chance if we have continuous attribute, or 100% if discrete attribute.\n #-------------------------------------------------------\n if not att_info[0] or random.random() > 0.5:\n self.specified_attributes.remove(att)\n self.condition.pop(i) #buildMatch handles both discrete and continuous attributes\n changed = True\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE - (mutate range with 50% probability vs. removing specification of this attribute all together)\n #-------------------------------------------------------\n else:\n #Mutate continuous range - based on Bacardit 2009 - Select one bound with uniform probability and add or subtract a randomly generated offset to bound, of size between 0 and 50% of att domain.\n attribute_range = float(att_info[1][1]) - float(att_info[1][0])\n mutate_range = random.random()*0.5*attribute_range\n if random.random() > 0.5: #Mutate minimum\n if random.random() > 0.5: #Add\n self.condition[i][0] += mutate_range\n else: #Subtract\n self.condition[i][0] -= mutate_range\n else: #Mutate maximum\n if random.random() > 0.5: #Add\n self.condition[i][1] += mutate_range\n else: #Subtract\n self.condition[i][1] -= mutate_range\n\n #Repair range - such that min specified first, and max second.\n self.condition[i].sort()\n changed = True\n #-------------------------------------------------------\n # NO MUTATION OCCURS\n #-------------------------------------------------------\n else:\n pass\n #-------------------------------------------------------\n # MUTATE PHENOTYPE\n #-------------------------------------------------------\n if cons.env.format_data.discrete_action:\n nowchanged = self.discreteActionMutation()\n #else:\n # nowChanged = self.continuousPhenotypeMutation(phenotype)\n\n if changed or nowchanged:\n return True", "def make_pipeline(context):\n \n # Base universe set to the Q1500US\n base_universe = Q500US()\n \n #Get all industry codes\n industry=morningstar.asset_classification.morningstar_industry_code.latest\n #Get all sector codes\n sector = Sector()\n \n # Create filters (to be used as masks) of different industries/sectors \n # This is the mask that should exclude the most stocks. \n # Note that these may need to be even further filtered to exclude securities outside of a \n # similar range of volumes/size. For instance, the defense sector stock provides stocks as large as # LMT but also small defense companies. Although this shouldn't matter due to the second filter of \n # crosscorrelation, this may be unnecassary computational expense. \n pipe=Pipeline()\n #Below forms a \"sentiment screen\" that takes only stocks that have been rated a certain number of times and of those ratings there are at least 2.85 times as many bull scored messages as there are bear scored messages. \n pipe.add(st.bull_scored_messages .latest, 'bull_scored_messages')\n pipe.add(st.bear_scored_messages .latest, 'bear_scored_messages')\n sentimentScreen=(((st.bull_scored_messages.latest) > (context.Sentiment_multiplier*st.bear_scored_messages.latest)) & (st.bear_scored_messages.latest > 5))\n \n dFilt=sector.eq(310) #Indicates aerospace/defense sector\n dFilt2=industry.eq(31052107) #Indicates aerospace/defense industry\n tFilt=sector.eq(311) #Indicates consumer electronics sector\n tFilt2=industry.eq(31167138) #Indicates consumer electronics industry \n cFilt=sector.eq(101) #Chemical sector\n cFilt2=industry.eq(10103003)\n aFilt=sector.eq(102)\n aFilt2=industry.eq(10209017) #Auto manufacturing industry\n depFilt2=industry.eq(10217034) #Department store industry\n #dFilt2,tFilt2,cFilt2,aFilt2=True,True,True,True #Remove industry requirement\n defenseFilt= dFilt & dFilt2 #Combination of filters\n techFilt= tFilt & tFilt2\n chemFilt = cFilt & cFilt2 \n autoFilt = aFilt & aFilt2 \n tradable=base_universe & (defenseFilt | techFilt | chemFilt | autoFilt | depFilt2) & sentimentScreen\n \n \n pipe.set_screen(tradable)\n pipe.add(defenseFilt,'defenseFilt')\n pipe.add(techFilt,'techFilt')\n pipe.add(chemFilt,'chemFilt')\n pipe.add(autoFilt,'autoFilt')\n pipe.add(depFilt2,'depFilt')\n \n \n \n #TODO: May also want to return stock sentiment data and further filter tuple couples by only accepting couples with sentiment data in a similar range (further attributing to the validity of the calculated cross-correlation)\n \n return pipe", "def _status(self):\n self._total_audio_text = urwid.Text('', align='right')\n self._audio_length_text = urwid.Text('', align='left')\n self._saved_text = urwid.Text('', align='center')\n status = urwid.Columns([\n ('weight', 1, self._audio_length_text),\n ('weight', 1, self._saved_text),\n ('weight', 1, self._total_audio_text),\n ])\n return urwid.AttrMap(status, 'status')", "def __init__(self):\n self.label = \"Calculate response\"\n self.description = \"Use this tool to combine the evidence weighted by their associated generalization in the weights-of-evidence table. This tool calculates the posterior probability, standard deviation (uncertainty) due to weights, variance (uncertainty) due to missing data, and the total standard deviation (uncertainty) based on the evidence and how the evidence is generalized in the associated weights-of-evidence tables.The calculations use the Weight and W_Std in the weights table from Calculate Weights.\"\n self.canRunInBackground = False\n self.category = \"Weights of Evidence\"", "def evaluate_design(self): # to update the pr object", "def attributes(c):\n global cfg # pylint: disable=global-variable-not-assigned\n if int(c['xp01']) >= cfg['card']['xp_limit']:\n return 'evolve'\n else:\n return 'level'", "def objective(self):\n pass", "def get_stat(self, code):\n total = 0\n for x in self.equipment:\n total += x.get_stat(code)\n return total", "def _obj_func(self):\n if self.session.stone_summons.sum() == SUMMONS_PER_SESSION:\n next_stone_cost = 1 # hack to make the comparison work\n else:\n next_stone_cost = stone_cost(self.session.stone_summons.sum())\n\n return (\n -(self.event.orbs_spent + next_stone_cost),\n self.session.stone_presences.sum(),\n )", "def calculate(self):", "def get_properties(self):\n assert self.kekulize, '#ERROR: u need to get explicit BOs for amon generation'\n self.vs = np.array([ ai.GetTotalValence() for ai in self.m0.GetAtoms() ], np.int)\n #self.update_bom()\n self.ias_heav = self.ias[ self.zs > 1 ]\n bom_heav = self.bom[ self.ias_heav, : ][ :, self.ias_heav ]\n self.vs_heav = bom_heav.sum(axis=0)\n self.cns_heav = ( bom_heav > 0 ).sum(axis=0)\n self.nhs = self.vs[:self.nheav] - self.vs_heav - self.chgs[:self.nheav]\n self.dvs = self.vs_heav - self.cns_heav\n self.hybs = np.array([ _hyb[ai.GetHybridization()] for ai in self.m.GetAtoms() ])", "def calc_ros(self, *args):\n return 0", "def stats(self):\n return {attr: getattr(self, attr) for attr in ['cash', 'rawg_quantity', 'rawg_demand', 'rawg_price', 'rig_quantity', 'rig_supply', 'rig_price']}", "def create_competition_stat_object():\n\n pga_northerntrust_2015 = {\"name\": \"pga_northerntrust_2015\", \"date\": [\"2015-02-16\", \"2015-02-22\"]}\n pga_ohlclassic_2015 = {\"name\": \"pga_ohlclassic_2015\", \"date\": [\"2014-11-10\", \"2014-11-16\"]}\n pga_pebblebeach_2015 = {\"name\": \"pga_pebblebeach_2015\", \"date\": [\"2015-02-09\", \"2015-02-15\"]}\n pga_arnoldpalmer_2015 = {\"name\": \"pga_arnoldpalmer_2015\", \"date\": [\"2015-03-16\", \"2015-03-22\"]}\n pga_pgachampionship_2015 = {\"name\": \"pga_pgachampionship_2015\", \"date\": [\"2015-08-10\", \"2015-08-16\"]}\n pga_barclays_2015 = {\"name\": \"pga_barclays_2015\", \"date\": [\"2015-08-24\", \"2015-08-30\"]}\n pga_playerschampions_2015 = {\"name\": \"pga_playerschampions_2015\", \"date\": [\"2015-05-04\", \"2015-05-10\"]}\n pga_bmw_2015 = {\"name\": \"pga_bmw_2015\", \"date\": [\"2015-09-14\", \"2015-09-20\"]}\n pga_puertoricoopen_2015 = {\"name\": \"pga_puertoricoopen_2015\", \"date\": [\"2015-03-02\", \"2015-03-08\"]}\n pga_bridgestone_2015 = {\"name\": \"pga_bridgestone_2015\", \"date\": [\"2015-08-03\", \"2015-08-09\"]}\n pga_quickenloans_2015 = {\"name\": \"pga_quickenloans_2015\", \"date\": [\"2015-07-27\", \"2015-08-02\"]}\n pga_cadillacchampionship_2015 = {\"name\": \"pga_cadillacchampionship_2015\", \"date\": [\"2015-03-02\", \"2015-03-08\"]}\n pga_rbccanadianopen_2015 = {\"name\": \"pga_rbccanadianopen_2015\", \"date\": [\"2015-07-20\", \"2015-07-26\"]}\n pga_cimbclassic_2015 = {\"name\": \"pga_cimbclassic_2015\", \"date\": [\"2014-10-20\", \"2014-10-26\"]}\n pga_rbcheritage_2015 = {\"name\": \"pga_rbcheritage_2015\", \"date\": [\"2015-04-13\", \"2015-04-19\"]}\n pga_farmersinsurance_2015 = {\"name\": \"pga_farmersinsurance_2015\", \"date\": [\"2015-02-02\", \"2015-02-08\"]}\n pga_cocacola_2015 = {\"name\": \"pga_cocacola_2015\", \"date\": [\"2015-09-21\", \"2015-09-27\"]}\n pga_sanderson_2015 = {\"name\": \"pga_sanderson_2015\", \"date\": [\"2014-11-03\", \"2014-11-09\"]}\n pga_deutchebank_2015 = {\"name\": \"pga_deutchebank_2015\", \"date\": [\"2015-08-31\", \"2015-09-07\"]}\n pga_shriners_2015 = {\"name\": \"pga_shriners_2015\", \"date\": [\"2014-10-13\", \"2014-10-19\"]}\n pga_frys_2015 = {\"name\": \"pga_frys_2015\", \"date\": [\"2014-10-06\", \"2014-10-12\"]}\n pga_sonyopen_2015 = {\"name\": \"pga_sonyopen_2015\", \"date\": [\"2015-01-12\", \"2015-01-18\"]}\n pga_greenbrier_2015 = {\"name\": \"pga_greenbrier_2015\", \"date\": [\"2015-06-29\", \"2015-07-05\"]}\n pga_thememorial_2015 = {\"name\": \"pga_thememorial_2015\", \"date\": [\"2015-06-01\", \"2015-06-07\"]}\n pga_hondaclassic_2015 = {\"name\": \"pga_hondaclassic_2015\", \"date\": [\"2015-02-23\", \"2015-03-01\"]}\n pga_theopen_2015 = {\"name\": \"pga_theopen_2015\", \"date\": [\"2015-07-13\", \"2015-07-19\"]}\n pga_houstonopen_2015 = {\"name\": \"pga_houstonopen_2015\", \"date\": [\"2015-03-30\", \"2015-04-05\"]}\n pga_usopen_2015 = {\"name\": \"pga_usopen_2015\", \"date\": [\"2015-06-15\", \"2015-06-21\"]}\n pga_hsbcchampions_2015 = {\"name\": \"pga_hsbcchampions_2015\", \"date\": [\"2014-11-03\", \"2014-11-09\"]}\n pga_valspar_2015 = {\"name\": \"pga_valspar_2015\", \"date\": [\"2015-03-09\", \"2015-03-15\"]}\n pga_humana_2015 = {\"name\": \"pga_humana_2015\", \"date\": [\"2015-01-19\", \"2015-01-25\"]}\n pga_wastemanagement_2015 = {\"name\": \"pga_wastemanagement_2015\", \"date\": [\"2015-01-26\", \"2015-02-01\"]}\n pga_hyundai_2015 = {\"name\": \"pga_hyundai_2015\", \"date\": [\"2015-01-05\", \"2015-01-12\"]}\n pga_wellsfargo_2015 = {\"name\": \"pga_wellsfargo_2015\", \"date\": [\"2015-05-11\", \"2015-05-17\"]}\n pga_johndeere_2015 = {\"name\": \"pga_johndeere_2015\", \"date\": [\"2015-07-06\", \"2015-07-12\"]}\n pga_wyndham_2015 = {\"name\": \"pga_wyndham_2015\", \"date\": [\"2015-08-17\", \"2015-08-23\"]}\n pga_masters_2015 = {\"name\": \"pga_masters_2015\", \"date\": [\"2015-04-16\", \"2015-04-12\"]}\n pga_zurich_2015 = {\"name\": \"pga_zurich_2015\", \"date\": [\"2015-04-20\", \"2015-04-26\"]}\n pga_valero_2015 = {\"name\": \"pga_valero_2015\", \"date\": [\"2015-03-23\", \"2015-03-29\"]}\n pga_crowneplaza_2015 = {\"name\": \"pga_crowneplaza_2015\", \"date\": [\"2015-05-18\", \"2015-05-24\"]}\n pga_attbyronnelson_2015 = {\"name\": \"pga_attbyronnelson_2015\", \"date\": [\"2015-05-25\", \"2015-05-31\"]}\n pga_fedexstjude_2015 = {\"name\": \"pga_fedexstjude_2015\", \"date\": [\"2015-06-08\", \"2015-06-14\"]}\n\n dates_1415 = [pga_northerntrust_2015 ,pga_ohlclassic_2015 ,pga_pebblebeach_2015 ,pga_arnoldpalmer_2015 ,\n pga_pgachampionship_2015 ,pga_barclays_2015 ,pga_playerschampions_2015 ,pga_bmw_2015 ,\n pga_puertoricoopen_2015 ,pga_bridgestone_2015 ,pga_quickenloans_2015 ,pga_cadillacchampionship_2015 ,\n pga_rbccanadianopen_2015 ,pga_cimbclassic_2015 ,pga_rbcheritage_2015 ,pga_farmersinsurance_2015 ,\n pga_cocacola_2015 ,pga_sanderson_2015 ,pga_deutchebank_2015 ,pga_shriners_2015 ,pga_frys_2015 ,\n pga_sonyopen_2015 ,pga_greenbrier_2015 ,pga_thememorial_2015 ,pga_hondaclassic_2015 ,pga_theopen_2015 ,\n pga_houstonopen_2015 ,pga_usopen_2015 ,pga_hsbcchampions_2015 ,pga_valspar_2015 ,pga_humana_2015 ,\n pga_wastemanagement_2015 ,pga_hyundai_2015 ,pga_wellsfargo_2015 ,pga_johndeere_2015 ,pga_wyndham_2015 ,\n pga_masters_2015 ,pga_zurich_2015 ,pga_valero_2015 ,pga_crowneplaza_2015 ,pga_attbyronnelson_2015 ,\n pga_fedexstjude_2015 ]\n\n return dates_1415", "def getStatus():", "def quantities():\n # publish the modules\n return (\n SI,\n angle, area, energy, force, length, mass, power, pressure,\n speed, substance, temperature, time, volume\n )", "def adm_prepare_status_message():\n # Create choice list for possible values in additional info. Choice list will\n # have name as <state_chart_name>States\n # This will referenced by additional info with name same as state chart on\n # acm object\n try:\n utils_obj = FIntegrationUtils.FIntegrationUtils()\n\n cmdty_delivery_details = [{'name':'CFR',\t'description':'Cost and Freight'},\n {'name':'CIF',\t'description':'Cost, Insurance, and Freight'},\n {'name':'CIP',\t'description':'Carriage and Insurance Paid'},\n {'name':'CPT',\t'description':'Carriage Paid To'},\n {'name':'DAF',\t'description':'Delivered At Frontier'},\n {'name':'DDP',\t'description':'Delivered Duty Paid'},\n {'name':'DDU',\t'description':'Delivered Duty Unpaid'},\n {'name':'DEQ',\t'description':'Delivered Ex Quay'},\n {'name':'DES',\t'description':'Delivered Ex Ship'},\n {'name':'DTD',\t'description':'Door To Door'},\n {'name':'EXW',\t'description':'EX Works'},\n {'name':'FAS',\t'description':'Free Alongside Ship'},\n {'name':'FCA',\t'description':'Free Carrier'},\n {'name':'FOB',\t'description':'Free On Board'},\n {'name':'LOC',\t'description':'LOCO London'},\n {'name':'OTH',\t'description':'Other'}\n ]\n\n # Create ChoiceList for all Commodity Delivery Details\n for vals in cmdty_delivery_details:\n try:\n utils_obj.create_choice_list('CmdtyDlvryDtls', [vals])\n except FIntegrationUtils.ChoiceListAlreadyExist as e:\n print((\"Choice List <%s> already exists\" % (vals['name'])))\n except Exception as e:\n print((\"Exception in creating Choice List : %s\" % str(e)))\n\n add_info_spec_cmdty_dlvry_dtls = {\n 'FieldName': 'CommodityDlvryDtls',\n 'Description': 'CmdtyDlvryDtls',\n 'Default': '',\n 'TypeGroup': 'RecordRef',\n 'Type': 'ChoiceList',\n 'Table': 'Trade',\n }\n\n add_info_spec_cmdty_alloc_dtls = {\n 'FieldName':'CommodityAllocated',\n 'Description':'Commodity Allocated',\n 'Default':'',\n 'TypeGroup':'Standard',\n 'Type':'Boolean',\n 'Table':'Trade'\n }\n\n FSwiftMLUtils.create_add_info()\n\n for each in [add_info_spec_cmdty_dlvry_dtls, add_info_spec_cmdty_alloc_dtls]:\n try:\n utils_obj.create_additional_info_spec(each)\n except FIntegrationUtils.AddInfoSpecAlreadyExits as e:\n print((\"Additional Info <%s> already exists on table <%s>\"%(each['FieldName'], each['Table'])))\n except Exception as e:\n print((\"Exception in creating Additional info : %s\"%str(e)))\n\n except Exception as e:\n print((\"Exception in adm_prepare_security_sett_conf : %s\"%str(e)))", "def coverage(self):\r\n return 0, 1", "def get_result_data(self):\n\n if self.catalog[\"wetlands_within_25m\"]:\n wetland_status = \"inside\"\n elif self.catalog[\"wetlands_within_100m\"]:\n wetland_status = \"close_to\"\n elif self.catalog[\"potential_wetlands_within_0m\"]:\n wetland_status = \"inside_potential\"\n else:\n wetland_status = \"outside\"\n\n if self.catalog[\"created_surface\"] >= 1000:\n project_size = \"big\"\n elif self.catalog[\"created_surface\"] >= 700:\n project_size = \"medium\"\n else:\n project_size = \"small\"\n\n return wetland_status, project_size", "def mezclar_bolsa(self):", "def test_attributes(self):\n result = self.plugin_instance.create_probability_cube(\n self.percentiles_cube, self.orography_cube)\n self.assertEqual(result.units, \"1\")\n self.assertEqual(result.name(), self.new_name)\n self.assertEqual(result.attributes['relative_to_threshold'], 'below')\n self.assertEqual(result.attributes['thresholded_using'],\n 'surface_altitude')", "def update(self, arm, reward, alpha=0.05, l=0.05):\n\n # Get context\n context = self.context.iloc[self.t, :]\n\n\n # Add price\n price_dict = {}\n productid_dict = {}\n \n for var in context.keys():\n price_dict[var + '_price'] = context[var] * self.df_arm_dummies.ix[arm, 'price']\n\n for i in range(10, 26):\n productid_dict[var + '_productid_' + str(i)] = context[var] * \\\n self.df_arm_dummies.ix[arm, 'productid_' + str(i)]\n\n print(\"Price dict is\")\n print(price_dict)\n print(productid_dict)\n \n\n#Age_price = context.Age * self.df_arm_dummies.ix[arm, 'price']\n#Agent_Linux_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_Linux\n#Agent_OSX_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_OSX\n#Agent_Windows_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_Windows\n#Agent_mobile_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_mobile\n#\n#\n#Language_EN_price = self.df_arm_dummies.ix[arm, 'price'] * context.Language_EN\n#Language_GE_price = self.df_arm_dummies.ix[arm, 'price'] * context.Language_GE\n#Language_NL_price = self.df_arm_dummies.ix[arm, 'price'] * context.Language_NL\n#Referer_Bing_price = self.df_arm_dummies.ix[arm, 'price'] * context.Referer_Bing\n#Referer_Google_price = self.df_arm_dummies.ix[arm, 'price'] * context.Referer_Google\n#\n\n combined = np.append(context, self.df_arm_dummies.iloc[arm, :])#.reshape(-1, 1)\n\n prices = prict_dict.items()\n\n # Combine with arm\n combined = np.append(combined,\n [Age_price,\n Agent_Linux_price,\n Agent_OSX_price,\n Agent_Windows_price,\n Agent_mobile_price,\n Language_EN_price,\n Language_GE_price,\n Language_NL_price,\n Referer_Bing_price,\n Referer_Google_price\n ]).reshape(-1, 1)\n \n if reward > 0:\n reward = 1\n else:\n reward = -1\n\n # Bayes\n self.B = self.B + np.dot(context, context)\n \n self.f = self.f + combined * reward\n\n self.mu_hat = np.dot(np.linalg.inv(self.B), self.f)\n\n self.mu = min(5, self.mu + 0.1 * (-0.5 + int(bool(reward))))\n\n # Update time step\n self.t += 1", "def calculate_conservation(self):\n for pchain in self.system.ProteinList:\n ConservationTools.fetch_msq_conservation(pchain)\n ConservationTools.calculate_conservation(self.system)\n self.rebuild_color_menu()", "def __init__(self):\n self.label = \"Grand WOFE\"\n self.description = \"From list of Evidence layers generate weights tables and output rasters from Calculate Respons and Logistic Regression.\"\n self.canRunInBackground = False\n self.category = \"Weights of Evidence\"", "def build_opt(self):\n student_taken = self.student.student_hist[['FULL','DESCR.y']]\n\n requiments = self.major_map.cleaned_major_data[['REQID','L','MA','CS','HU','SB','SQ','SG','C','G','H','Honor']]\n\n requiments.is_copy = False\n requiments['REQID'] = requiments['REQID'].astype('str')\n\n requirements_to = pd.merge(self.graph, requiments, how='inner', left_on='REQ', right_on='REQID')\n \n # requirements_to = pd.merge(self.graph, requiments, how='left', left_on='REQ', right_on='REQID')\n classes_from = pd.merge(self.graph, student_taken, how='left', left_on='CLS', right_on='FULL')\n\n return classes_from, requirements_to", "def main(attr, condition, val):\r\n data=dict()\r\n listOfData=list()\r\n PREFIX = \"\"\"\r\n PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\r\n PREFIX owl: <http://www.w3.org/2002/07/owl#>\r\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\r\n PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\r\n PREFIX : <http://www.semanticweb.org/user/ontologies/2020/7/untitled-ontology-32#>\r\n \"\"\"\r\n FEATURES = [\"Age\", \"BS Fast\", \"BS pp\", \"Plasma R\", \"Plasma F\", \"HbAlc\"]\r\n\r\n request = PREFIX + \"\"\"\r\n SELECT ?age ?bsf ?bsp ?plasmaR ?plasmaF ?hbalc\r\n WHERE {{ \r\n ?subject :hasID ?id; \r\n :hasAge ?age;\r\n :hasBloodSugar ?bsf;\r\n :hasBloodPressure ?bsp;\r\n :hasPlasmaR ?plasmaR;\r\n :hasPlasmaF ?plasmaF;\r\n :hasHBALC ?hbalc.\r\n FILTER({attr} {condition} {val}).\r\n }}\r\n \"\"\".format(attr=attr, condition=condition, val=val)\r\n\r\n graph = connectOntology(os.path.join(settings.BASE_DIR, \"dbo3.owl\"))\r\n if graph:\r\n results = getData(graph, request)\r\n else:\r\n print(\"Data could not be fetched\")\r\n\r\n for result in results:\r\n for feature, row in zip(FEATURES, result):\r\n data[feature]=float(stripData(row))\r\n #print(\"{}: {}\".format(feature, stripData(row)))\r\n listOfData.append(data)\r\n\r\n #print((listOfData))\r\n return listOfData", "def total_organic_compound(self):\n return self.indoor_air_quality[1]", "def award_status_populator():\n award_status_list = funding_data[\"Project Status:\"].unique()\n return [{'label': i, 'value': i} for i in award_status_list]", "def computation_attributes(initial_condition, flux): \n \n group_path = initial_condition + \"/\" + flux \n \n computations_database = h5py.File(database_path, \"r\")\n attributes = {}\n attributes[\"a\"] = computations_database[group_path].attrs[\"a\"]\n attributes[\"T\"] = computations_database[group_path].attrs[\"T\"]\n attributes[\"CFL\"] = computations_database[group_path].attrs[\"CFL\"]\n computations_database.close()\n \n return attributes", "def stats(self):\n\t\t\n\t\tx = [self.authorities[i] for i in self.authorities if self.authorities[i] != 1]\n\t\txx = sorted(x)\n\t\tl = float(len(xx))\n\t\tprint \"-----------\"\n\t\tprint \"Population : \" + str(l)\n\t\tprint \"-----------\"\n\t\tprint \"Q1 = \" + str(xx[int(l/4)])\n\t\tprint \"Q3 = \" + str(xx[int(float(l/4)*3)])\n\t\tprint \"-----------\"\n\t\tprint \"01/08 = \" + str(xx[int(l/8)])\n\t\tprint \"07/08 = \" + str(xx[int(float(l/8)*7)])\n\t\tprint \"-----------\"\n\t\tprint \"01/16 = \" + str(xx[int(l/16)])\n\t\tprint \"15/16 = \" + str(xx[int(float(l/16)*15)])\n\t\tprint \"-----------\"\n\t\tprint \"01/32 = \" + str(xx[int(l/32)])\n\t\tprint \"31/32 = \" + str(xx[int(float(l/32)*31)])\n\t\tprint \"-----------\"\n\t\tprint \"01/64 = \" + str(xx[int(l/64)])\n\t\tprint \"63/64 = \" + str(xx[int(float(l/64)*63)])\n\t\tprint \"-----------\"\n\t\tprint \"01/128 = \" + str(xx[int(l/128)])\n\t\tprint \"127/128 = \" + str(xx[int(float(l/128)*127)])\n\t\tprint \"-----------\"\n\t\tprint \"01/256 = \" + str(xx[int(l/256)])\n\t\tprint \"255/256 = \" + str(xx[int(float(l/256)*255)])\n\t\tprint \"-----------\"\n\t\tprint \"01/512 = \" + str(xx[int(l/512)])\n\t\tprint \"511/512 = \" + str(xx[int(float(l/512)*511)])\n\t\tprint \"-----------\"", "def _compute_wo(self):\n self.uom_id = self.wo_id.uom_id.id\n self.final_product_id = self.wo_id.final_product_id.id\n self.service_ids = [x.product_id.id for x in self.wo_id.consumed_service_ids if x.product_id]", "def generate_huawei_2g_cell_level_discrepancies(self):\n engine = create_engine('postgresql://bodastage:password@database/bts')\n vendor_pk = 2\n tech_pk = 1\n schema_name = 'hua_cm_2g'\n\n conn = psycopg2.connect(\"dbname=bts user=bodastage password=password host=database\")\n conn.autocommit = True\n cur = conn.cursor()\n\n # Get MO\n sql = \"\"\"\n SELECT DISTINCT\n t3.name as mo,\n t3.pk as pk,\n t3.affect_level\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 1\n \"\"\".format(vendor_pk, tech_pk)\n cur.execute(sql)\n mo_list = cur.fetchall()\n\n for mo in mo_list:\n mo_name, mo_pk, mo_affect_level = mo\n\n # Get parameters\n sql = \"\"\"\n SELECT \n t2.name as pname,\n t2.pk as pk\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n INNER JOIN network_entities t4 on t4.pk = t3.affect_level\n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n WHERE\n t3.name = '{}'\n \"\"\".format(vendor_pk, tech_pk, mo_name)\n cur.execute(sql)\n\n parameters = cur.fetchall()\n\n attr_list = [p[0] for p in parameters]\n\n str_param_values = \",\".join([\"t_mo.{0}{1}{0}\".format('\"', p) for p in attr_list])\n str_param_names = \",\".join([\"{0}{1}{0}\".format('\\'', p) for p in attr_list])\n\n # Join all cell level mos with the primary cell mo i.e. GCELL\n cell_level_join = \"\"\" INNER JOIN {0}.GCELL gcell ON gcell.\"CELLID\" = t_mo.\"CELLID\" AND gcell.neid = t_mo.neid \n AND gcell.module_type = t_mo.module_type \"\"\".format(schema_name)\n\n # Add new entries\n sql = \"\"\"\n INSERT INTO network_audit.network_baseline \n (node, site, cellname, mo, parameter, bvalue, nvalue, vendor, technology, age, modified_by, added_by, date_added, date_modified)\n SELECT TT1.* FROM (\n SELECT\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 1 as age,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1\n LEFT JOIN network_audit.network_baseline TT2 on TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT2.cellname is NULL\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Delete old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.* FROM \n network_audit.network_baseline TT2\n LEFT JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT1.cellname IS NULL\n )\n DELETE FROM network_audit.network_baseline t1\n WHERE t1.pk IN (SELECT pk from rd)\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Update old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.pk, TT1.* FROM \n network_audit.network_baseline TT2\n INNER JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n trim(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n )\n UPDATE network_audit.network_baseline AS nb\n SET \n date_modified = rd.date_added, \n age=DATE_PART('day',AGE(nb.date_added, rd.date_added))\n FROM \n rd \n where \n rd.pk = nb.pk\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)", "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n transition_state_probs = self.mdp.getTransitionStatesAndProbs(state, action)\n # Add each state and probability to q_value\n q_value = 0\n for state_, probability in transition_state_probs:\n state_reward = self.mdp.getReward(state, state_, action)\n q_value += probability * (state_reward + self.discount * self.values[state_])\n return q_value", "def computeQValueFromValues(self, state, action):\n\n ##util.raiseNotDefined()\n #\"*** YOUR CODE STARTS HERE ***\"\n # Code to remove --- from here\n transitions = self.mdp.getTransitionStatesAndProbabilities(state, action)\n qvalue = 0\n for (nextState, probability) in transitions:\n reward = self.mdp.getReward(state, action, nextState)\n qvalue += probability *(reward + self.discount*self.values[nextState])\n # Code to remove --- to here\n #\"*** YOUR CODE FINISHES HERE ***\"\n \n return qvalue", "def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass", "def GetObjVal(self) :\n if not self.maxWeights : return 0\n elif 'pp' in self.__type : return pulp.lpSum(self.prodVars )\n elif self.__type == 'prBinCat' : return np.dot(self.wish.T, self.dispo ).sum() \n elif 'pr' in self.__type : return np.multiply(self.wish, self.dispo).sum()\n else : return 0", "def getETA():", "def getETA():", "def _quality_status_type():\n return {\n 'name' : 'quality_status_type',\n 'is_open' : False,\n 'doc' : None,\n 'members' : [\n ('reported', None),\n ('confirmed', None),\n ('partially_resolved', None),\n ('resolved', None),\n ],\n }", "def getQValue(self, state, action):\n features = self.featExtractor.getFeatures(state, action)\n total = 0\n for feat in features:\n total += self.getWeights()[feat] * features[feat]\n return total", "def produce_min(self, meta, raven_variables, dispatch, t):\n #balance = defaultdict(float)\n interaction = self.get_interaction()\n balance, meta = interaction.produce_min(meta, raven_variables, dispatch, t)\n #for resource, quantity in int_balance.items():\n # balance[resource] += quantity\n return balance, meta", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[1,0,100]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)\", mu)')", "def getFeatures(self, state, action):\n features = qutils.Qcounter()\n features['bias'] = 1.0\n\n if state is None:\n return features\n else:\n\n if self.id%2 == 0:\n plrCoords = state.board.plr_coords['r']\n oppCoords = state.board.plr_coords['b']\n else:\n plrCoords = state.board.plr_coords['b']\n oppCoords = state.board.plr_coords['r']\n\n goalState = GoalState(state.board.plr_coords['r'],state.board.plr_coords['b'],state.agents[self.id].hand,\n state.board.draft)\n if action['coords'] is not None:\n draftCoords = goalState.CardsToCoords([action['draft_card']])\n else:\n draftCoords = None\n\n features['euclideanDistanceCentroid'] = eucDist(action, plrCoords)\n features['neighbour'] = neighbour(action, plrCoords, oppCoords)\n features['heart'] = heart(action, plrCoords)\n features['blockHeart'] = blockHeart(action, oppCoords)\n features['eHorizontal'] = eHorizontal(state, action, plrCoords, oppCoords)\n features['eVertical'] = eVertical(state, action, plrCoords, oppCoords)\n features['eIandIIIDiag'] = eIandIIIDiagonal(state, action, plrCoords, oppCoords)\n features['eIIandIVDiag'] = eIIandIVDiagonal(state, action, plrCoords, oppCoords)\n features['draftHorizontal'] = draftHorizontal(state, plrCoords, oppCoords, draftCoords)\n features['draftVertical'] = draftVertical(state, plrCoords, oppCoords, draftCoords)\n features['draftDiagIandIII'] = draftDiagIandIII(state, plrCoords, oppCoords, draftCoords)\n features['draftDiagIIandIV'] = draftDiagIIandIV(state, plrCoords, oppCoords, draftCoords)\n features['draftJacks'] = DraftJacks(action)\n features['PlayCentre'] = PlayCentre(action)\n features['HeuristicValuePlace'] = HeuristicValue(action, goalState)\n features['HeuristicValueDraft'] = HeuristicValueDraft(action, goalState, draftCoords, self.gamma)\n return features", "def status_summary(self):\n base_query_set = super(PeeringSessionManager, self).get_queryset()\n summary = base_query_set.annotate(\n label=models.Case(\n models.When(provisioning_state=2, then=models.Case(\n models.When(admin_state=2, then=models.Case(\n models.When(operational_state=6,\n then=models.Value('Up')),\n default=models.Value('Down')\n )),\n default=models.Value('Admin Down')\n )),\n models.When(provisioning_state=1,\n then=models.Value('Provisioning')),\n default=models.Value('None'),\n output_field=models.CharField()\n )).values('label').annotate(value=models.Count('label'))\n return summary", "def specificity():\n\tatlas = 'power'\n\tproject='hcp'\n\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's PC & j's Q\"]\n\ttasks = ['REST','WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL',]\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tdf = pd.DataFrame(columns = df_columns)\n\tfor task in tasks:\n\t\tprint task\n\t\t# subjects = np.array(hcp_subjects).copy()\n\t\t# subjects = list(subjects)\n\t\t# subjects = remove_missing_subjects(subjects,task,atlas)\n\t\tsubjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task,atlas))\n\t\tstatic_results = graph_metrics(subjects,task,atlas,'fz')\n\t\tsubject_pcs = static_results['subject_pcs']\n\t\tsubject_wmds = static_results['subject_wmds']\n\t\tsubject_mods = static_results['subject_mods']\n\t\tsubject_wmds = static_results['subject_wmds']\n\t\tmatrices = static_results['matrices']\n\t\t#sum of weight changes for each node, by each node.\n\t\thub_nodes = ['WCD']\n\t\t# hub_nodes = ['PC']\n\t\tdriver_nodes_list = ['Q+','Q-']\n\t\t# driver_nodes_list = ['Q+']\n\t\tmean_pc = np.nanmean(subject_pcs,axis=0)\n\t\tmean_wmd = np.nanmean(subject_wmds,axis=0)\n\t\tmod_pc_corr = np.zeros(subject_pcs.shape[1])\n\t\tfor i in range(subject_pcs.shape[1]):\n\t\t\tmod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]\n\t\tmod_wmd_corr = np.zeros(subject_wmds.shape[1])\n\t\tfor i in range(subject_wmds.shape[1]):\n\t\t\tmod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]\n\t\tfor hub_node in hub_nodes:\n\t\t\tif hub_node == 'PC':\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\t\tconnector_nodes = np.where(mod_pc_corr>0.0)[0]\n\t\t\t\tlocal_nodes = np.where(mod_pc_corr<0.0)[0]\n\t\t\telse:\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\t\tconnector_nodes = np.where(mod_wmd_corr>0.0)[0]\n\t\t\t\tlocal_nodes = np.where(mod_wmd_corr<0.0)[0]\n\t\t\tedge_thresh_val = 50.0\n\t\t\tedge_thresh = np.percentile(np.nanmean(matrices,axis=0),edge_thresh_val)\n\t\t\tpc_edge_corr[:,np.nanmean(matrices,axis=0)<edge_thresh] = np.nan\n\t\t\tfor driver_nodes in driver_nodes_list:\n\t\t\t\tweight_change_matrix_between = np.zeros((num_nodes,num_nodes))\n\t\t\t\tweight_change_matrix_within = np.zeros((num_nodes,num_nodes))\n\t\t\t\tif driver_nodes == 'Q-':\n\t\t\t\t\tdriver_nodes_array = local_nodes\n\t\t\t\telse:\n\t\t\t\t\tdriver_nodes_array = connector_nodes\n\t\t\t\tfor n1,n2 in permutations(range(num_nodes),2):\n\t\t\t\t\tif n1 not in driver_nodes_array:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[n2] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tarray = pc_edge_corr[n1][n2]\n\t\t\t\t\tweight_change_matrix_between[n1,n2] = np.nansum(pc_edge_corr[n1][n2][np.where((known_membership!=known_membership[n2])&(np.arange(264)!=n1))])\n\t\t\t\t\tweight_change_matrix_within[n1,n2] = np.nansum(pc_edge_corr[n1][n2][np.where((known_membership==known_membership[n2])&(np.arange(264)!=n1))])\n\t\t\t\t\t# for n3 in range(264):\n\t\t\t\t\t# \tif n1 == n3:\n\t\t\t\t\t# \t\tcontinue\n\t\t\t\t\t# \tif known_membership[n3]!= known_membership[n2]:\n\t\t\t\t\t# \t\tweight_change_matrix_between[n1,n2] = np.nansum([weight_change_matrix_between[n1,n2],array[n3]])\n\t\t\t\t\t# \t\tbetween_len = between_len + 1\n\t\t\t\t\t# \telse:\n\t\t\t\t\t# \t\tweight_change_matrix_within[n1,n2] = np.nansum([weight_change_matrix_within[n1,n2],array[n3]])\n\t\t\t\t\t# \t\tcommunity_len = community_len + 1\n\t\t\t\t\t# weight_change_matrix_within[n1,n2] = weight_change_matrix_within[n1,n2] / community_len\n\t\t\t\t\t# weight_change_matrix_between[n1,n2] = weight_change_matrix_between[n1,n2] / between_len\n\t\t\t\ttemp_matrix = np.nanmean(matrices,axis=0)\n\t\t\t\tweight_matrix = weight_change_matrix_within-weight_change_matrix_between\n\t\t\t\tweight_matrix[np.isnan(weight_matrix)] = 0.0\n\t\t\t\tif hub_node == 'PC':\n\t\t\t\t\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's PC & j's Q\"]\n\t\t\t\telse:\n\t\t\t\t\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's WCD & j's Q\"]\n\t\t\t\tdf_array = []\n\t\t\t\tfor i,j in zip(temp_matrix[weight_matrix!=0.0].reshape(-1),weight_matrix[weight_matrix!=0.0].reshape(-1)):\n\t\t\t\t\tdf_array.append([task,hub_node,driver_nodes,i,j])\n\t\t\t\tdf = pd.concat([df,pd.DataFrame(df_array,columns=df_columns)],axis=0)\n\t\t\t\tprint hub_node, driver_nodes\n\t\t\t\tprint pearsonr(weight_matrix[weight_matrix!=0.0].reshape(-1),temp_matrix[weight_matrix!=0.0].reshape(-1))\n\t\t\t\t1/0\n\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q+') &(df['Hub Measure']=='PC')],\"Strength of r's, i's PC & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_pcqplus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q-') &(df['Hub Measure']=='PC')],\"Strength of r's, i's PC & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_pcqminus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q+') &(df['Hub Measure']=='WCD')],\"Strength of r's, i's WCD & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_wmdqplus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q-') &(df['Hub Measure']=='WCD')],\"Strength of r's, i's WCD & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_wmdqminus_%s.pdf'%(edge_thresh_val))\n\t# \"\"\"\n\t# Are connector nodes modulating the edges that are most variable across subjects?\n\t# \"\"\"\n\t# atlas='power'\n\t# known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\t# for task in tasks:\n\t# \tpc_thresh = 75\n\t# \tlocal_thresh = 25\n\t# \tsubjects = np.array(hcp_subjects).copy()\n\t# \tsubjects = list(subjects)\n\t# \tsubjects = remove_missing_subjects(subjects,task,atlas)\n\t# \tstatic_results = graph_metrics(subjects,task,atlas)\n\t# \tsubject_pcs = static_results['subject_pcs']\n\t# \tsubject_wmds = static_results['subject_wmds']\n\t# \tmatrices = static_results['matrices']\n\t# \tmatrices[:,np.nanmean(matrices,axis=0)<0.0] = np.nan\n\t# \tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))\n\t# \t# pc_edge_corr = pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas))\n\t# \tstd_mod = []\n\t# \ttstd = np.std(matrices,axis=0).reshape(-1)\n\t# \tfor i in range(num_nodes):\n\t# \t\tstd_mod.append(nan_pearsonr(pc_edge_corr[i].reshape(-1),tstd)[0])\n\t# \t# print task, pearsonr(np.nanmean(subject_pcs,axis=0),std_mod)\n\t# \tprint task, pearsonr(np.nanmean(subject_wmds,axis=0),std_mod)\n\t# \tplot_corr_matrix(np.std(matrices,axis=0),network_names.copy(),out_file=None,plot_corr=True,return_array=False)", "def radiator(env):\n envs = environments()\n check_env(env, envs)\n\n if env == '*':\n query_type = ''\n if get_db_version(puppetdb) < (4, 0, 0):\n query_type = 'type=default,'\n query = None\n metrics = get_or_abort(\n puppetdb.metric,\n 'puppetlabs.puppetdb.population:%sname=num-nodes' % query_type)\n num_nodes = metrics['Value']\n else:\n query = AndOperator()\n metric_query = ExtractOperator()\n\n query.add(EqualsOperator(\"catalog_environment\", env))\n query.add(EqualsOperator(\"facts_environment\", env))\n metric_query.add_field(FunctionOperator('count'))\n metric_query.add_query(query)\n\n metrics = get_or_abort(\n puppetdb._query,\n 'nodes',\n query=metric_query)\n num_nodes = metrics[0]['count']\n\n nodes = puppetdb.nodes(\n query=query,\n unreported=app.config['UNRESPONSIVE_HOURS'],\n with_status=True\n )\n\n stats = {\n 'changed_percent': 0,\n 'changed': 0,\n 'failed_percent': 0,\n 'failed': 0,\n 'noop_percent': 0,\n 'noop': 0,\n 'skipped_percent': 0,\n 'skipped': 0,\n 'unchanged_percent': 0,\n 'unchanged': 0,\n 'unreported_percent': 0,\n 'unreported': 0,\n }\n\n for node in nodes:\n if node.status == 'unreported':\n stats['unreported'] += 1\n elif node.status == 'changed':\n stats['changed'] += 1\n elif node.status == 'failed':\n stats['failed'] += 1\n elif node.status == 'noop':\n stats['noop'] += 1\n elif node.status == 'skipped':\n stats['skipped'] += 1\n else:\n stats['unchanged'] += 1\n\n try:\n stats['changed_percent'] = int(100 * (stats['changed'] /\n float(num_nodes)))\n stats['failed_percent'] = int(100 * stats['failed'] / float(num_nodes))\n stats['noop_percent'] = int(100 * stats['noop'] / float(num_nodes))\n stats['skipped_percent'] = int(100 * (stats['skipped'] /\n float(num_nodes)))\n stats['unchanged_percent'] = int(100 * (stats['unchanged'] /\n float(num_nodes)))\n stats['unreported_percent'] = int(100 * (stats['unreported'] /\n float(num_nodes)))\n except ZeroDivisionError:\n stats['changed_percent'] = 0\n stats['failed_percent'] = 0\n stats['noop_percent'] = 0\n stats['skipped_percent'] = 0\n stats['unchanged_percent'] = 0\n stats['unreported_percent'] = 0\n\n if ('Accept' in request.headers and\n request.headers[\"Accept\"] == 'application/json'):\n return jsonify(**stats)\n\n return render_template(\n 'radiator.html',\n stats=stats,\n total=num_nodes\n )", "def __init__(self):\n {}\n #generate a monoid Q\n self.monoid_Q = self.generateQ()[0]\n self.relationOfElements_Q = self.generateQ()[1]\n self.p_Position = self.generateQ()[2]\n self.qOfPosition = self.generateQ()[3]\n #print(self.qOfPosition)", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def generate_huawei_2g_site_level_discrepancies(self):\n engine = create_engine('postgresql://bodastage:password@database/bts')\n vendor_pk = 2\n tech_pk = 1\n schema_name = 'hua_cm_2g'\n\n conn = psycopg2.connect(\"dbname=bts user=bodastage password=password host=database\")\n conn.autocommit = True\n cur = conn.cursor()\n\n # Get MO\n sql = \"\"\"\n SELECT DISTINCT\n t3.name as mo,\n t3.pk as pk,\n t3.affect_level\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 4\n \"\"\".format(vendor_pk, tech_pk)\n cur.execute(sql)\n mo_list = cur.fetchall()\n\n for mo in mo_list:\n mo_name, mo_pk, mo_affect_level = mo\n\n # Get parameters\n sql = \"\"\"\n SELECT \n t2.name as pname,\n t2.pk as pk\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n INNER JOIN network_entities t4 on t4.pk = t3.affect_level\n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 4\n WHERE\n t3.name = '{}'\n \"\"\".format(vendor_pk, tech_pk, mo_name)\n cur.execute(sql)\n\n parameters = cur.fetchall()\n\n attr_list = [p[0] for p in parameters]\n\n str_param_values = \",\".join([\"t_mo.{0}{1}{0}\".format('\"', p) for p in attr_list])\n str_param_names = \",\".join([\"{0}{1}{0}\".format('\\'', p) for p in attr_list])\n\n # Join all cell level mos with the primary cell mo i.e. GCELL.\n # p_mo for primary MO\n cell_level_join = \"\"\" INNER JOIN {0}.BTS p_mo ON p_mo.\"BTSID\" = t_mo.\"BTSID\" AND p_mo.neid = t_mo.neid \n AND p_mo.module_type = t_mo.module_type \"\"\".format(schema_name)\n\n # Add new entries\n sql = \"\"\"\n INSERT INTO network_audit.baseline_site_parameters \n (node, site, mo, parameter, bvalue, nvalue, vendor, technology, age, modified_by, added_by, date_added, date_modified)\n SELECT TT1.* FROM (\n SELECT\n t8.name as node,\n t7.name as site,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 1 as age,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.\"BTSNAME\" as sitename,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.sites t7 on t7.name = t4.sitename \n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t7.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t7.tech_pk\n ) TT1\n LEFT JOIN network_audit.baseline_site_parameters TT2 on TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT2.site is NULL\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Delete old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.* FROM \n network_audit.baseline_site_parameters TT2\n LEFT JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.\"BTSNAME\" as sitename,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.sites t7 on t7.name = t4.sitename \n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t7.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t7.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT1.site IS NULL\n )\n DELETE FROM network_audit.baseline_site_parameters t1\n WHERE t1.pk IN (SELECT pk from rd)\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Update old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.pk, TT1.* FROM \n network_audit.baseline_site_parameters TT2\n INNER JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n trim(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.\"BTSNAME\" as sitename,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.sites t7 on t7.name = t4.sitename \n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t7.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t7.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n )\n UPDATE network_audit.baseline_site_parameters AS nb\n SET \n date_modified = rd.date_added, \n age=DATE_PART('day',AGE(nb.date_added, rd.date_added))\n FROM \n rd \n where \n rd.pk = nb.pk\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def q_greedify_policy(env, V, pi, s, gamma):\n ### START CODE HERE ###\n ##q(s,a)=sigma(P(ss')*(gamma*V(s')+R(s,a,s'))\n q = np.zeros((env.action_space.n))\n for idx, action in enumerate(range(env.action_space.n)):\n for prob_next_state, next_state, reward_next_state, done in env.P[s][action]:\n q[idx] += prob_next_state * ((gamma * V[next_state]) + reward_next_state)\n\n greedy_action = np.argmax(q)\n # print(greedy_action)\n for action, action_prob in enumerate(pi[s]):\n if action == greedy_action:\n print(action, greedy_action)\n pi[s][action] = 1\n else:\n pi[s][action] = 0", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # OUR CODE HERE\n #get the value of the state\n qVal = self.values[state]\n #iterate through the MDP transition states from the current state\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n #q value = discount * expected value of reward of state\n qVal += self.discount * probability * self.values[transitionState]\n return qVal\n # END OUR CODE", "def generate_csv(self, context): # pylint: disable=R0912,R0914\n temp = self.percentage()\n total_state = self.unique_freq()\n offline_value = -1\n ghz_conversion = 1000000\n mhz_conversion = 1000\n with open(self.outfile, 'a+') as f:\n writer = csv.writer(f, delimiter=',')\n reader = csv.reader(f)\n # Create the header in the format below\n # workload name, iteration, state, A7 CPU0,A7 CPU1,A7 CPU2,A7 CPU3,A15 CPU4,A15 CPU5\n if sum(1 for row in reader) == 0:\n header_row = ['workload', 'iteration', 'state']\n count = 0\n for cluster, states_list in enumerate(self.currentstates_of_clusters):\n for dummy_index in range(len(states_list)):\n header_row.append(\"{} CPU{}\".format(self.corename_of_clusters[cluster], count))\n count += 1\n writer.writerow(header_row)\n if offline_value in total_state:\n total_state.remove(offline_value) # remove the offline state\n for i in sorted(total_state):\n temprow = []\n temprow.extend([context.result.spec.label, context.result.iteration])\n if \"state{}\".format(i) in self.idlestate_description:\n temprow.append(self.idlestate_description[\"state{}\".format(i)])\n else:\n state_value = float(i)\n if state_value / ghz_conversion >= 1:\n temprow.append(\"{} Ghz\".format(state_value / ghz_conversion))\n else:\n temprow.append(\"{} Mhz\".format(state_value / mhz_conversion))\n for j in range(self.device.number_of_cores * self.multiply_factor):\n temprow.append(\"{0:.3f}\".format(temp[\"cpu{}\".format(j)][i]))\n writer.writerow(temprow)\n check_off = True # Checking whether core is OFFLINE\n for i in range(self.device.number_of_cores * self.multiply_factor):\n temp_val = \"{0:.3f}\".format(temp[\"cpu{}\".format(i)][offline_value])\n if float(temp_val) > 1:\n check_off = False\n break\n if check_off is False:\n temprow = []\n temprow.extend([context.result.spec.label, context.result.iteration])\n temprow.append(\"OFFLINE\")\n for i in range(self.device.number_of_cores * self.multiply_factor):\n temprow.append(\"{0:.3f}\".format(temp[\"cpu{}\".format(i)][offline_value]))\n writer.writerow(temprow)", "def uom(self):\n return self.__uom", "def report_update():\r\n resources[\"water\"] = resources[\"water\"] - MENU[order][\"ingredients\"][\"water\"]\r\n resources[\"milk\"] = resources[\"milk\"] - MENU[order][\"ingredients\"][\"milk\"]\r\n resources[\"coffee\"] = resources[\"coffee\"] - MENU[order][\"ingredients\"][\"coffee\"]\r\n resources[\"money\"] = resources[\"money\"] + total", "def get_summary_statistics(self):\n # Get log 10 total mutation count\n self.log_mut_count = np.log10(self.variant_df.shape[0])\n\n # Get the number of variants stratified by functional location of variant\n # E.g. Exon, Intron, 5'UTR, etc.\n self.functional_counts = pd.DataFrame(self.variant_df['Func.refGene'].value_counts())\n self.functional_counts.columns = [self.sample_name]\n \n # Get the number of variants stratified by exonic functional outcome of variant\n # E.g. Silent, Nonsense, Missense, etc.\n self.mutational_class_counts = (\n pd.DataFrame(self.variant_df['ExonicFunc.refGene'].value_counts())\n )\n self.mutational_class_counts.columns = [self.sample_name]\n \n # Get number of COSMIC curated events\n self.cosmic_variants = self.variant_df[self.variant_df['cosmic70'] != '.']\n self.cosmic_variants = self.cosmic_variants.assign(sample_name = self.sample_name,\n final_id = self.final_id)\n self.cosmic_variant_counts = self.cosmic_variants.shape[0]\n \n # Get depth summary\n self.depth_summary = pd.DataFrame(self.variant_df['depth'].astype(int).describe())\n self.depth_summary.columns = [self.sample_name]\n \n return self.functional_counts, self.mutational_class_counts, self.depth_summary", "def example_run_function(run, df):\n\n df['fcreward'] = calc.glm_groups.fraction(run.parent, 'ensure-vdrive-plus')\n df['reward'] = calc.glm_groups.fraction(run.parent, 'ensure')\n\n return df", "def getAllAttribute(self):\n\n self.shape_type = OpenMaya.MPlug(self.thisObj, self.iShapeType).asShort()\n self.draw_type = OpenMaya.MPlug(self.thisObj, self.iDrawingType).asShort()\n self.up_axis = OpenMaya.MPlug(self.thisObj, self.iUpAxis).asShort()\n self.xRay = OpenMaya.MPlug(self.thisObj, self.iXRay).asBool()\n self.billBoard = OpenMaya.MPlug(self.thisObj, self.iBillBoard).asBool()\n self.forceRefresh = OpenMaya.MPlug(self.thisObj, self.iForceRefresh).asBool()\n\n plug_edge_color = OpenMaya.MPlug(self.thisObj, self.iEdgeColor)\n self.edge_color = self.getMPoint(plug_edge_color)\n self.edge_opacity = OpenMaya.MPlug(self.thisObj, self.iEdgeOpacity).asFloat()\n\n plug_polygon_color = OpenMaya.MPlug(self.thisObj, self.iPolygonColor)\n self.polygon_color = self.getMPoint(plug_polygon_color)\n self.polygon_opacity = OpenMaya.MPlug(self.thisObj, self.iPolygonOpacity).asFloat()\n\n self.shape_size = OpenMaya.MPlug(self.thisObj, self.iShapeSize).asFloat()\n self.edge_size = OpenMaya.MPlug(self.thisObj, self.iEdgeSize).asFloat()\n\n plug_offset_position = OpenMaya.MPlug(self.thisObj, self.iPositionOffset)\n self.offset_position = self.getMPoint(plug_offset_position)\n plug_offset_rotation = OpenMaya.MPlug(self.thisObj, self.iRotationOffset)\n self.offset_rotation = self.getMPoint(plug_offset_rotation)", "def create_vuln_report():", "def usefulquantities(dffin):\n dffin['log_length_box'] = np.log(dffin['length_box_um'])\n dffin['time_min']=dffin['time_sec']/60\n dffin['pred_length_box_um'] = np.exp(dffin['pred_log_length'])\n dffin['unique_id'] = dffin['cell']+dffin['time_sec'].apply(lambda x:str(x))\n dffin['cv_gr']= dffin.groupby('cell')['pred_growth_rate'].transform(lambda x:\\\n np.std(x)/np.mean(x))\n dffin['std_gr']= dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.std(x))\n dffin['mean_gr'] = dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.mean(x))\n dffin['mean_len'] = dffin.groupby('cell')['pred_length_box_um'].transform(lambda x: np.mean(x))\n dffin['norm_pred_growth_rate'] = (dffin['pred_growth_rate']-dffin.groupby('cell')['pred_growth_rate'].transform(lambda\\\n x: np.mean(x)))/dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.mean(x))\n dffin = rl.genalogy(dffin,'parent_cell') #Create genealogy\n dffin = rl.genalogy(dffin,'g_parent_cell')\n dffin = rl.genalogy(dffin,'g_g_parent_cell')\n dffin = dffin.set_index('unique_id')\n qq= dffin.groupby('cell').apply(lambda x: (x['pred_length_box_um']-x['pred_length_box_um'].iloc[0])/(x['pred_length_box_um'].iloc[-1]-x['pred_length_box_um'].iloc[0])).rename('add_len')\n jj= dffin.groupby('cell').apply(lambda x: (x['time_sec']-x['time_sec'].iloc[0])/(x['time_sec'].iloc[-1]-x['time_sec'].iloc[0])).rename('cell_cycle')\n return pd.concat([dffin, qq.reset_index().set_index('unique_id')['add_len'], jj.reset_index().set_index('unique_id')['cell_cycle']], axis=1, join='inner')", "def __init__(self, alpha = 0.4, gamma=0.2, n=1):\n self.epsilon = 0.3 # chance of taking a random action instead of the best\n\n # stats part\n\n self.reward=[]\n self.totalCount=0\n self.totalOnTarget=0\n self.shootCount=0\n\n\n self.phasesTemp=0\n self.phasesOnTarget=[]\n\n self.arrowAngleCount=dict()\n self.arrowAngleOn=dict()\n self.loadStats()\n\n ##QLearning varaibles\n self.q_table = {}\n self.loadTrainedData()\n self.n, self.alpha, self.gamma = n, alpha, gamma", "def main_paper(mode = 'q-diff'):\n mode_name = 'Shared Autonomy'\n if mode== 'override':\n mode_name = 'Overrider'\n\n #trains rl again and fills a q-table\n _, rl_agent = rl_alone(1000, fill_table = True)\n\n #prints the q-table\n for y in range(7):\n for x in range(7):\n state_ind = y*(7)+x\n q = rl_agent.get_q_values(state_ind)\n print('Q-table')\n print(state_ind, [x,y], np.around(q, decimals=5))\n\n in_li = []\n r_li = []\n rp_li=[]\n\n print(\"Begin looping through constraint values\")\n for i in np.arange(0.0, 1.025, 0.025):\n #cooperates and gets results\n rewards, actionsct, rewardsP = grid_human_co(coagent=rl_agent, threshold=i, verbose=False, mode=mode)\n print(\"Threshold: \", i)\n avgInter = np.mean(actionsct, axis=0)\n avgR = np.mean(rewards, axis=0)\n avgRP = np.mean(rewardsP, axis=0)\n in_li.append(avgInter)\n r_li.append(avgR)\n rp_li.append(avgRP)\n print('Avg num of interventions: ', avgInter)\n print('Avg reward: ', avgR)\n print('Avg reward penalized: ', avgRP)\n print()\n\n plt.figure()\n plt.plot(np.arange(0.0, 1.025, 0.025), in_li)\n plt.title(mode_name + ' Interventions')\n plt.xlabel(r\"$\\alpha$\")\n plt.ylabel('Average intervention count')\n plt.show()\n\n plt.figure()\n plt.title(mode_name + ' Returns', fontsize=14)\n plt.xlabel(r\"$\\alpha$\", fontsize=13)\n plt.ylabel('Average return', fontsize=13)\n plt.plot(np.arange(0.0, 1.025, 0.025), r_li,label='Average environment return')\n plt.plot(np.arange(0.0, 1.025, 0.025), rp_li,label=\"Average intervention return\")\n plt.legend(fontsize=9)\n plt.show()", "def total_occupancy_modifier(frame, data):\n\n occupancies = data.particles['Occupancy'][...]\n site_type = data.particles['Particle Type'][...]\n num_site_types = occupancies.shape[1] #Accessing the number of columns in the Occupancy matrix\n total_occupancy = np.sum(occupancies, axis=1) #Summing over all columns\n # NOTE By convention, the first half of types is Si, the second half is C\n is_si_site = site_type <= num_site_types//2\n is_c_site = site_type > num_site_types//2\n si_occupancy = np.sum(occupancies[:, :(num_site_types//2)], axis=1)\n c_occupancy = np.sum(occupancies[:, (num_site_types//2):], axis=1)\n data.particles_.create_property('Total Occupancy', data=total_occupancy.astype(int))\n data.particles_.create_property('Is Si Site', data=is_si_site.astype(int))\n data.particles_.create_property('Is C Site', data=is_c_site.astype(int))\n data.particles_.create_property('Si Occupancy', data=si_occupancy.astype(int))\n data.particles_.create_property('C Occupancy', data=c_occupancy.astype(int))", "def plans():", "def generate_huawei_2g_node_level_discrepancies(self):\n engine = create_engine('postgresql://bodastage:password@database/bts')\n vendor_pk = 2\n tech_pk = 1\n schema_name = 'hua_cm_2g'\n\n conn = psycopg2.connect(\"dbname=bts user=bodastage password=password host=database\")\n conn.autocommit = True\n cur = conn.cursor()\n\n # Get MO\n sql = \"\"\"\n SELECT DISTINCT\n t3.name as mo,\n t3.pk as pk,\n t3.affect_level\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 7 -- BSC\n \"\"\".format(vendor_pk, tech_pk)\n cur.execute(sql)\n mo_list = cur.fetchall()\n\n for mo in mo_list:\n mo_name, mo_pk, mo_affect_level = mo\n\n # Get parameters\n sql = \"\"\"\n SELECT \n t2.name as pname,\n t2.pk as pk\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n INNER JOIN network_entities t4 on t4.pk = t3.affect_level\n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 7 -- BSC\n WHERE\n t3.name = '{}'\n \"\"\".format(vendor_pk, tech_pk, mo_name)\n cur.execute(sql)\n\n parameters = cur.fetchall()\n\n attr_list = [p[0] for p in parameters]\n\n str_param_values = \",\".join([\"t_mo.{0}{1}{0}\".format('\"', p) for p in attr_list])\n str_param_names = \",\".join([\"{0}{1}{0}\".format('\\'', p) for p in attr_list])\n\n # Join all cell level mos with the primary cell mo i.e. GCELL.\n # p_mo for primary MO\n cell_level_join = \"\"\" INNER JOIN {0}.BSCBASIC p_mo ON p_mo.neid = t_mo.neid \n AND p_mo.module_type = t_mo.module_type \"\"\".format(schema_name)\n\n # Add new entries\n sql = \"\"\"\n INSERT INTO network_audit.baseline_node_parameters \n (node, mo, parameter, bvalue, nvalue, vendor, technology, age, modified_by, added_by, date_added, date_modified)\n SELECT TT1.* FROM (\n SELECT\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 1 as age,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value \n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1\n LEFT JOIN network_audit.baseline_node_parameters TT2 on TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT2.node is NULL\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Delete old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.* FROM \n network_audit.baseline_node_parameters TT2\n LEFT JOIN \n (\n select\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT1.node IS NULL\n )\n DELETE FROM network_audit.baseline_node_parameters t1\n WHERE t1.pk IN (SELECT pk from rd)\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Update old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.pk, TT1.* FROM \n network_audit.baseline_node_parameters TT2\n INNER JOIN \n (\n select\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n trim(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n )\n UPDATE network_audit.baseline_node_parameters AS nb\n SET \n date_modified = rd.date_added, \n age=DATE_PART('day',AGE(nb.date_added, rd.date_added))\n FROM \n rd \n where \n rd.pk = nb.pk\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)", "def update_compartments(self, food_glucose):\n self.g_t, self.m_t = self.new_values(food_glucose, self.get_variables())", "def get_reward(self):\n # Ver list\n self.Verlist = {\n '1': False,\n '2': False,\n '3': False,\n '4': False,\n '5': True,\n }\n # --------------------------------- NEW ----\n r = 0\n if self.ENVGetSIReset:\n V = {\n 'CoolRateTemp': self.DRateFun(self.mem['KCNTOMS']['Val']),\n 'CurrentTemp': self.mem['UAVLEG2']['Val'],\n 'CurrentPres': self.mem['ZINST65']['Val'],\n 'Dis': abs(self.DRateFun(self.mem['KCNTOMS']['Val']) - self.mem['UAVLEG2']['Val']),\n 'PZRLevel': self.mem['ZINST63']['Val'],\n 'SG1Nar': self.mem['ZINST78']['Val'], 'SG2Nar': self.mem['ZINST77']['Val'],\n 'SG3Nar': self.mem['ZINST76']['Val'],\n 'SG1Wid': self.mem['ZINST72']['Val'], 'SG2Wid': self.mem['ZINST71']['Val'],\n 'SG3Wid': self.mem['ZINST70']['Val'],\n 'SG1Pres': self.mem['ZINST75']['Val'], 'SG2Pres': self.mem['ZINST74']['Val'],\n 'SG3Pres': self.mem['ZINST73']['Val'],\n }\n if self.Verlist['1']:\n # Cooling rate에 따라서 온도 감소\n r -= V['Dis'] / 100\n # 가압기 수위 10 아래 종료\n # if V['PZRLevel'] <= 10: r -= 100\n if self.Verlist['2']:\n # 목표치까지 도달\n r += (29.5 - V['CurrentPres']) / 100\n r += (170 - V['CurrentTemp']) / 100\n if self.Verlist['3']:\n # Cooling rate에 따라서 온도 감소\n dis_reward = - V['Dis'] / 100 # [0.0 ~ -0.2] 동향을 보임\n # Pressure and Temp Dis\n curp = 29.5 if V['CurrentPres'] <= 29.5 else V['CurrentPres']\n curt = 170 if V['CurrentTemp'] <= 170 else V['CurrentTemp']\n dis_pres = (29.5 - V['CurrentPres']) / 100\n dis_temp = (170 - V['CurrentTemp']) / 100\n\n # r += (dis_pres * 0.1) + (dis_temp * 0.1) + (dis_reward * 10) # 감압 X\n r += (dis_pres * 0.1) + (dis_reward * 5)\n if self.Verlist['4']:\n # Cooling rate에 따라서 온도 감소\n dis_reward = - V['Dis'] / 100 # [0.0 ~ -0.2] 동향을 보임\n # Pressure and Temp Dis\n curp = 29.5 if V['CurrentPres'] <= 29.5 else V['CurrentPres']\n dis_pres = (29.5 - V['CurrentPres']) / 100\n PT_reward = - PTCureve().Check(Temp=V['CurrentTemp'], Pres=V['CurrentPres'])\n r += (dis_pres * 0.1) + (dis_reward * 5) + (PT_reward * 0.1)\n if self.Verlist['5']:\n r = 0\n # 1] Cooling rate에 따라서 온도 감소\n coolrate_r = - V['Dis']\n # 2] 가압기 수위 20~76% 구간 초과시 패널티\n pzrlevel_r = 0\n if 20 <= V['PZRLevel'] <= 76:\n pass\n else:\n if 20 > V['PZRLevel']:\n pzrlevel_r -= (20 - V['PZRLevel'])\n else:\n pzrlevel_r -= (V['PZRLevel'] - 76)\n # 3] 증기 발생기 6% ~ 50% 이상 초과 시 패널티\n sg_r = 0\n for _ in range(1, 4):\n if 6 <= V[f'SG{_}Nar'] <= 50:\n pass\n else:\n if 6 > V[f'SG{_}Nar']:\n sg_r -= (6 - V[f'SG{_}Nar'])\n else:\n sg_r -= (V[f'SG{_}Nar'] - 50)\n # 4] PT 커브에서 벗어나면 거리만큼 패널티\n PT_reward = - PTCureve().Check_Dis(Temp=V['CurrentTemp'], Pres=V['CurrentPres'])\n # 5] 목표치와 가까워 질 수록 +\n pres_r, temp_r = 0, 0\n pres_r = (29.5 - V['CurrentPres'])\n temp_r = (170 - V['CurrentTemp'])\n # 6] S/G 압력\n Avg_pres = (V['SG1Pres'] + V['SG2Pres'] + V['SG3Pres'])/3\n SGpres_r = 9 - Avg_pres if Avg_pres > 9 else 0\n # --------------------------------------------------------------\n w = {\n 'coolrate_r': [coolrate_r, 2],\n 'pzrlevel_r': [pzrlevel_r, 1],\n 'sg_r': [sg_r, 1.5],\n 'PT_reward': [PT_reward, 3],\n 'pres_r': [pres_r, 1],\n 'temp_r': [temp_r, 0.5],\n 'SGpres_r': [SGpres_r, 0.5]\n }\n\n log_txt_temp = ''\n for key in w.keys():\n r += w[key][0] * w[key][1]\n log_txt_temp += f'[{round(w[key][0]*w[key][1], 1)}:{w[key][0]}*{w[key][1]}]_'\n log_txt_temp = f'R:{r} = ' + log_txt_temp\n\n self.Loger_txt += log_txt_temp\n\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+{dis_temp * 0.1}+({dis_reward * 10})\\t\"\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+({dis_reward * 5})\\t\" #Verlist['3']\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+({dis_reward * 5})+({PT_reward * 0.1})\\t\"\n\n # --------------------------------- Send R ----\n self.AcumulatedReward += r\n # self.Loger_txt += f'{r}\\t'\n self.DIS_CSF_Info += f'[R: {r}]\\t'\n return r", "def query_tas_status(self, attributes):\n def _tas_status_callback(mqtt, userdata, msg):\n for k, v in attributes['values'].items():\n setattr(self, k, nested_get(literal_eval(msg.payload.decode('UTF-8')), v))\n s_topic = '{s_topic}/{stat_topic}'.format(stat_topic=attributes['stat_topic'], **self)\n c_topic = '{c_topic}/status'.format(**self)\n self.mqtt.message_callback_add(s_topic, _tas_status_callback)\n self.mqtt.connect(self.mqtt_host)\n self.mqtt.subscribe(s_topic)\n starttime = datetime.datetime.now()\n self.mqtt.publish(c_topic, attributes['status_payload'])\n # check and see if the last attribute has been found yet\n while getattr(self, list(attributes['values'].keys())[-1]) == '' and (datetime.datetime.now() - starttime).total_seconds() < loop_time:\n self.mqtt.loop(timeout=loop_time)\n self.mqtt.unsubscribe(s_topic)\n self.mqtt.message_callback_remove(s_topic)\n self.mqtt.disconnect()", "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qValue = 0\n transitions = self.mdp.getTransitionStatesAndProbs(state, action)\n #print('Transitions: ' + str(transitions))\n for t in transitions:\n nextState, prob = t\n reward = self.mdp.getReward(state, action, nextState)\n #print('Reward: ' + str(reward))\n oneTransition = prob * (reward + self.discount * self.values[nextState])\n qValue = qValue + oneTransition\n return qValue", "def printClassifier(self):\n classifier_info = \"\"\n for att in range(cons.env.format_data.numb_attributes):\n att_info = cons.env.format_data.attribute_info[att]\n if att in self.specified_attributes: #If the attribute was specified in the rule\n i = self.specified_attributes.index(att)\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n classifier_info += str(self.condition[i][0])+';'+str(self.condition[i][1]) + \"\\t\"\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n classifier_info += str(self.condition[i]) + \"\\t\"\n else: # Attribute is wild.\n classifier_info += '#' + \"\\t\"\n #-------------------------------------------------------------------------------\n specificity = len(self.condition) / float(cons.env.format_data.numb_attributes)\n\n if cons.env.format_data.discrete_action:\n classifier_info += str(self.action)+\"\\t\"\n else:\n classifier_info += str(self.action[0])+';'+str(self.action[1])+\"\\t\"\n #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n classifier_info += '{:.1f}'.format(self.prediction)+\"\\t\"+'{:.2f}'.format(self.error)+\"\\t\"+'{:.2f}'.format(self.fitness)+\"\\t\"+str(self.numerosity)+\"\\t\"+str(self.ga_count)+\"\\t\"\n classifier_info += '{:.1f}'.format(self.mean_actionset_sz)+\"\\t\\t\"+str(self.ga_timestamp)+\"\\t\\t\"+str(self.init_timestamp)+\"\\t\\t\"+'{:.2f}'.format(specificity)+\"\\t\\t\"\n classifier_info += '{:.1f}'.format(self.delete_vote)+\"\\t\\t\"+str(self.action_cnt)+\"\\n\"\n\n #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n return classifier_info", "def gen_values(self):", "def testHealthAssessPyoGangrenosum(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"pyo_gangrenosum\")\n\n self.util.boolPropertyTest(self, attr, \"pyo_gangrenosum\")" ]
[ "0.5788508", "0.5613959", "0.55464876", "0.55033845", "0.546878", "0.54424125", "0.53677684", "0.5304037", "0.5268452", "0.52517754", "0.5244365", "0.52423894", "0.52423894", "0.52004385", "0.5194574", "0.51699513", "0.5161064", "0.51408136", "0.5137545", "0.51323235", "0.5128762", "0.51139414", "0.51139414", "0.51110953", "0.51098883", "0.5106512", "0.5093523", "0.50804543", "0.5044875", "0.5017072", "0.50113595", "0.4984885", "0.49575996", "0.4947293", "0.49444604", "0.49420834", "0.49404266", "0.4933163", "0.4932108", "0.49280387", "0.4925855", "0.4911256", "0.49091196", "0.49059224", "0.4902755", "0.4900076", "0.4889757", "0.4877074", "0.4872721", "0.48711023", "0.48659965", "0.4861705", "0.48609224", "0.4860133", "0.48570493", "0.4854893", "0.48544404", "0.48538142", "0.48524046", "0.48457626", "0.48390025", "0.48342735", "0.48295903", "0.48278478", "0.48269388", "0.48254612", "0.48254612", "0.48201248", "0.48180288", "0.48163432", "0.48152396", "0.481131", "0.48092446", "0.48089147", "0.48013926", "0.4801349", "0.48012245", "0.47998506", "0.47997886", "0.47986758", "0.47898224", "0.47823304", "0.47816226", "0.47774962", "0.47757077", "0.47726786", "0.47721675", "0.47706884", "0.4761671", "0.47596312", "0.47595066", "0.4759272", "0.47591814", "0.4755884", "0.4755341", "0.4753581", "0.4751739", "0.47505227", "0.47415063", "0.4739106", "0.47379848" ]
0.0
-1
Calculate the heat recovery
def calc_lost_heat_recovery (self): if not self.cd['heat recovery operational']: self.lost_heat_recovery = [0] else: gen_eff = self.cd["diesel generation efficiency"] self.lost_heat_recovery = \ (self.generation / gen_eff )* .10
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_loss_heat_recovery (self):\n hr_used = self.cd['heat recovery operational']\n self.loss_heat_recovery = 0\n if hr_used:# == 'Yes':\n self.loss_heat_recovery = self.electric_diesel_reduction * \\\n (self.comp_specs['percent heat recovered'] / 100.0)\n #~ print 'self.loss_heat_recovery',self.loss_heat_recovery", "def generateHeatMask( im ):\n featref = computeFeatures(im)\n h,w,nbp = im.shape\n print(\"w:%d,h:%d\"%(w,h))\n heatMap = np.zeros((h,w,1), dtype=np.int8)\n black = [0,0,0]\n white = [255,255,255]\n rMaxDiff = 0\n for j in range(h):\n print(\"j:%d\" % j )\n for i in range(w):\n #~ print(\"i:%d\" % i )\n arDiff = []\n for color in [black,white]:\n #~ print(\"color:%s\" % color)\n imt = np.copy(im)\n \n # quite same time on my tablet !!! \n # (would have think the [] would be far fastest!)\n if 0:\n cv2.circle(imt, (i,j), 1, color )\n else:\n imt[j,i]=color\n if 0:\n cv2.imshow(\"imt\",imt)\n cv2.waitKey(1)\n #~ feat = computeFeatures(imt)\n #~ rDiff = diffFeatures(featref,feat)\n rDiff = mseFloat(im,imt)\n arDiff.append(rDiff)\n #~ print(rDiff)\n rDiff = max(arDiff)\n if rDiff > rMaxDiff:\n rMaxDiff = rDiff\n heatMap[j,i] = rDiff*10\n print(\"rMaxDiff: %5.3f\" % rMaxDiff )\n #~ print(dir(cv2))\n #~ heatMap = cv2.resize(heatMap,(w*2,h*2))\n cv2.namedWindow(\"heat\",cv2.CV_WINDOW_AUTOSIZE|cv2.WINDOW_NORMAL)\n cv2.imshow(\"heat\",heatMap)\n cv2.resizeWindow(\"heat\",600,480)\n cv2.waitKey(0)", "def manipulate_heat_data(self): \n self.exh.T_array = ( 0.5 * (self.exh.T_inlet_array +\n self.exh.T_outlet_array) + 273.15)\n self.exh.delta_T_array = ( self.exh.T_inlet_array -\n self.exh.T_outlet_array )\n \n self.cool.delta_T_array = ( self.cool.T_inlet_array -\n self.cool.T_outlet_array )\n self.cool.C = self.cool.mdot * self.cool.c_p", "def get_specific_heat() -> float:\n return 1006.0", "def post_heatdiag(self,ds):\n #\n self.drmid=self.rmid*0 # mem allocation\n self.drmid[1:-1]=(self.rmid[2:]-self.rmid[0:-2])*0.5\n self.drmid[0]=self.drmid[1]\n self.drmid[-1]=self.drmid[-2]\n\n dt = np.zeros_like(self.time)\n dt[1:] = self.time[1:] - self.time[0:-1]\n dt[0] = dt[1]\n rst=np.nonzero(dt<0) #index when restat happen\n dt[rst]=dt[rst[0]+1]\n self.dt = dt\n\n #get separatrix r\n self.rs=np.interp([1],self.psin,self.rmid)\n \n self.rmidsepmm=(self.rmid-self.rs)*1E3 # dist from sep in mm\n\n #get heat\n self.qe=np.transpose(self.e_perp_energy_psi + self.e_para_energy_psi)/dt/ds\n self.qi=np.transpose(self.i_perp_energy_psi + self.i_para_energy_psi)/dt/ds\n self.ge=np.transpose(self.e_number_psi)/dt/ds\n self.gi=np.transpose(self.i_number_psi)/dt/ds\n\n self.qe = np.transpose(self.qe)\n self.qi = np.transpose(self.qi)\n self.ge = np.transpose(self.ge)\n self.gi = np.transpose(self.gi)\n\n self.qt=self.qe+self.qi\n #imx=self.qt.argmax(axis=1)\n mx=np.amax(self.qt,axis=1)\n self.lq_int=mx*0 #mem allocation\n\n for i in range(mx.shape[0]):\n self.lq_int[i]=np.sum(self.qt[i,:]*self.drmid)/mx[i]", "def heat(self, delta_temp):\n return self.heat_capacity * self.mass * delta_temp", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def tldiffusion(self, dt):\n\n # Reset erosion, depo, trans and flux_in to 0\n self.erosion[:] = 0.0\n self.depo[:] = 0.0\n self.trans[:] = 0.0\n self.flux_in[:] = 0.0\n\n # Downstream steepest slope at node:\n self.steepest = self.grid.at_node[\"topographic__steepest_slope\"]\n # On each node, node ID of downstream receiver node\n # (on node (i), ID of node that receives flow from node (i)):\n self.receiver = self.grid.at_node[\"flow__receiver_node\"]\n\n dx = self.grid.dx\n cores = self.grid.core_nodes\n\n # Calculate influx rate on node i = outflux of nodes\n # whose receiver is i\n for i in self.grid.core_nodes:\n self.flux_in[self.receiver[i]] += self.flux_out[i]\n\n # Calculate transport coefficient\n # When S ~ Scrit, d_coeff is set to \"infinity\", for stability and\n # so that there is no deposition\n if self.steepest[i] >= self.slope_crit:\n self.d_coeff[i] = 1000000000.0\n else:\n self.d_coeff[i] = 1 / (\n 1 - (np.power(((self.steepest[i]) / self.slope_crit), 2))\n )\n\n # Calculate deposition rate on node\n self.depo[cores] = self.flux_in[cores] / self.d_coeff[cores]\n\n # Calculate erosion rate on node (positive value)\n # If S > Scrit, erosion is simply set for the slope to return to Scrit\n # Otherwise, erosion is slope times erodibility coefficent\n for i in self.grid.core_nodes:\n if self.steepest[i] > self.slope_crit:\n self.erosion[i] = dx * (self.steepest[i] - self.slope_crit) / (100 * dt)\n else:\n self.erosion[i] = self.k * self.steepest[i]\n\n # Update elevation\n self.elev[i] += (-self.erosion[i] + self.depo[i]) * dt\n\n # Calculate transfer rate over node\n self.trans[cores] = self.flux_in[cores] - self.depo[cores]\n\n # Calculate outflux rate\n self.flux_out[:] = self.erosion + self.trans", "def conductive_heat_flux(discr, eos, cv, grad_t):\n transport = eos.transport_model()\n return -transport.thermal_conductivity(eos, cv)*grad_t", "def specific_heat(Ekinv,n0,N,t0):\n Cv = np.zeros(10)\n for i in range(10):\n avg_K_squared = (1 / len(Ekinv[int(n0-50+5*i):int(n0-46+5*i)]) * np.sum(Ekinv[int(n0-50+5*i):int(n0-46+5*i)]))**2\n FluctK = 1 / len(Ekinv[int(n0-50+5*i):int(n0-46+5*i)]) * np.sum(Ekinv[int(n0-50+5*i):int(n0-46+5*i)]**2) - avg_K_squared\n Cv[i] = -1 / (FluctK/ avg_K_squared * 3 * N / 2 - 1) * 3 * N / 2\n return Cv", "def apply_heat_recovery(\n enduse,\n heat_recovered,\n service,\n service_techs,\n curr_yr\n ):\n try:\n # Fraction of heat recovered in current year\n heat_recovered_p_cy = heat_recovered[enduse][curr_yr]\n\n if heat_recovered_p_cy == 0:\n return service, service_techs\n else:\n # Apply to technologies each stored in dictionary\n service_reduced_techs = {}\n for tech, service_tech in service_techs.items():\n service_reduced_techs[tech] = service_tech * (1.0 - heat_recovered_p_cy)\n\n # Apply to array\n service_reduced = service * (1.0 - heat_recovered_p_cy)\n\n return service_reduced, service_reduced_techs\n\n except KeyError:\n\n # no recycling defined\n return service, service_techs", "def conduct_heat(self, delta_time, external_power):\n\t\tself.temperature_container = self.temperature_container+self.area*external_power*delta_time/(self.heat_capacity_container*self.mass_container)#https://en.wikipedia.org/wiki/Heat_capacity\n\t\t\n\t\tinternal_power = 0.591*(self.temperature_container-self.temperature)/0.01#No idea of this is right. Mainly the devides by its length bit. https://en.wikipedia.org/wiki/Thermal_conduction#Fourier's_law\n\t\t\n\t\tif (self.heat_capacity*self.mass())!=0:\n\t\t\tself.temperature = self.temperature+internal_power*delta_time/(self.heat_capacity*self.mass())\n\t\t\t#self.temperature_container=self.temperature_container-internal_power*delta_time/(self.heat_capacity_container*self.mass_container)#Als je dit toevoegd lijkt de simulatie niet goed meer te werken dus nog even uitzoeken heo dat zit.", "def load_heatdiag(self, **kwargs):\n read_rz = kwargs.get('read_rz',True) #read heat load in RZ\n\n self.hl=[]\n self.hl.append( self.datahlp(\"xgc.heatdiag.bp\",0,read_rz) ) #actual reading routine\n self.hl.append( self.datahlp(\"xgc.heatdiag.bp\",1,read_rz) )#actual reading routine\n\n for i in [0,1] :\n try:\n self.hl[i].psin=self.hl[i].psi[0,:]/self.psix #Normalize 0 - 1(Separatrix)\n except:\n print(\"psix is not defined - call load_unitsm() to get psix to get psin\")\n\n #read bfieldm data if available\n self.load_bfieldm()\n\n #dt=self.unit_dic['sml_dt']*self.unit_dic['diag_1d_period']\n wedge_n=self.unit_dic['sml_wedge_n']\n for i in [0,1]:\n dpsin=self.hl[i].psin[1]-self.hl[i].psin[0] #equal dist\n #ds = dR* 2 * pi * R / wedge_n\n ds=dpsin/self.bfm.dpndrs* 2 * 3.141592 * self.bfm.r0 /wedge_n #R0 at axis is used. should I use Rs?\n self.hl[i].rmid=np.interp(self.hl[i].psin,self.bfm.psino,self.bfm.rmido)\n self.hl[i].post_heatdiag(ds)\n self.hl[i].total_heat(wedge_n)", "def test_calculate_specific_heat(self):\n expected = np.array([1089.5, 1174.0, 1258.5], dtype=np.float32)\n result = WetBulbTemperature()._calculate_specific_heat(self.mixing_ratio)\n self.assertArrayAlmostEqual(result, expected, decimal=2)", "def bruteforce(self):\n import time\n t1 = time.time()\n for i in range(self.td.shape[0]):\n #Get the latitude at the start of the row, this is used for the entire row\n\n if i % config.LATITUDE_STEP == 0:\n startlat = i + config.LATITUDE_STEP #move to the center of the step\n startlat += self.start #Offset for parallel segmentation\n\n # This is the latitude at the center of the tile defined by\n # the image width, and the latitude_step\n x = int(self.td.shape[1] / 2)\n y = int((startlat + config.LATITUDE_STEP) / 2)\n latitude, _ = self.temperature.pixel_to_latlon(x,y)\n\n lat_f = PchipInterpolator(self.latitudenodes, self.lookup, extrapolate=False, axis=0)\n #The reshape corresponds to the dimensions of the OLAP cube\n # 5 elevations, 5 slope azimuths, 3 slopes, 3 opacities, 3 albedos, and finally 20 TI\n data = lat_f(latitude)\n compressedlookup = data.reshape(6,5,3,3,3,20)\n # Compute the PChip interpolation function for elevation\n elevation_interp_f = PchipInterpolator(np.array([-5.0, -2.0, -1.0, 1.0, 6.0, 8.0]), compressedlookup, extrapolate=False, axis=0)\n \n for j in range(self.td.shape[1]):\n # Each interpolation is composed in 2 parts.\n # 1. The interpolation function is computed.\n # 2. The interpolation function is applied.\n #print(self.reference[i,j], self.r_ndv)\n # If either the reference or the input THEMIS have no data\n if (self.td[i,j] == self.ndv) or (self.reference[i,j] == self.r_ndv):\n #The pixel is no data in the input, propagate to the output\n self.resultdata[i,j] = self.ndv\n continue\n\n #Interpolate elevation\n try:\n new_elevation = elevation_interp_f(self.ed[i,j])\n except:\n # The elevation is bad.\n self.resultdata[i,j] = self.ndv\n self.log[i,j] = self.error_codes['elevation_out_of_bounds']\n continue\n #Interpolate Slope Azimuth\n slopeaz_f = self.compute_interpolation_function(sorted(self.slopeaz_lookup.keys()),\n new_elevation,\n config.SLOPEAZ_INTERPOLATION)\n new_slopeaz = slopeaz_f(self.sz[i,j])\n #Interpolate Slope\n slope_f = self.compute_interpolation_function(sorted(self.slope_lookup.keys()),\n new_slopeaz,\n config.SLOPE_INTERPOLATION)\n capped_slope = self.sd[i,j]\n if capped_slope > 60.0:\n capped_slope = 60.0\n new_slope = slope_f(capped_slope)\n # I am having problems here with pulling TAU properly - check montabone!\n #Interpolate Tau\n tau_f = PchipInterpolator(sorted(self.tau_lookup.keys()),\n new_slope,\n extrapolate=False,\n axis=0)\n new_tau = tau_f(self.od[i,j])\n #Interpolate Albedo\n albedo_f = self.compute_interpolation_function(sorted(self.albedo_lookup.keys()),\n new_tau,\n config.ALBEDO_INTERPOLATION)\n new_albedo = albedo_f(self.ad[i,j])\n #Interpolate Inertia\n self.resultdata[i,j] = self.extract_monotonic(self.td[i,j],\n new_albedo)", "def heat_balance(index):\n t = index[0]\n return (\n heat_hru_out[t]\n + pulp.lpSum([component_output[i, t] for i in index_heat_out])\n - pulp.lpSum([component_input[i, t] for i in index_heat_in])\n + heat_unserve[t]\n - heat_dump[t]\n == forecast[\"heat_load\"][t]\n )", "def compute_energy(img):\r\n # urmati urmatorii pasi:\r\n # 1. transformati imagine in grayscale\r\n # 2. folositi filtru sobel pentru a calcula gradientul in directia X si Y\r\n # 3. calculati magnitudinea imaginii\r\n\r\n img_gray_scale = cv.cvtColor(img, cv.COLOR_BGR2GRAY);\r\n\r\n #de cautat totusi si codul pt SOBEL pe net\r\n grad_x = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 1, dy = 0, borderType = cv.BORDER_CONSTANT)\r\n grad_y = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 0, dy = 1, borderType = cv.BORDER_CONSTANT)\r\n\r\n#E repr gradientii aka cat se sch un pixel de la unul la altul\r\n E = abs(grad_x) + abs(grad_y)\r\n # print(grad_y)\r\n # print(grad_x)\r\n\r\n cv.imwrite(\"poza.jpg\", E)\r\n return E", "def calc_loss_flux(self, shotANDplunge=\"167192.1\"):\n\n wb = xl.load_workbook(\"recLPdata.xlsx\", data_only=True)\n dataSheet = wb.get_sheet_by_name(\"Sheet1\")\n\n # Get the correct cells.\n if shotANDplunge == \"167192.1\":\n timeLow = \"A3\"\n timeHigh = \"A64\"\n densLow = \"C3\"\n densHigh = \"C64\"\n tempLow = \"D3\"\n tempHigh = \"D64\"\n rMinRsepLow = \"G3\"\n rMinRsepHigh = \"G64\"\n elif shotANDplunge == \"167192.2\":\n timeLow = \"I3\"\n timeHigh = \"I55\"\n densLow = \"K3\"\n densHigh = \"K55\"\n tempLow = \"L3\"\n tempHigh = \"L55\"\n rMinRsepLow = \"O3\"\n rMinRsepHigh = \"O55\"\n elif shotANDplunge == \"167193.1\":\n timeLow = \"Q3\"\n timeHigh = \"Q61\"\n densLow = \"S3\"\n densHigh = \"S61\"\n tempLow = \"T3\"\n tempHigh = \"T61\"\n rMinRsepLow = \"W3\"\n rMinRsepHigh = \"W61\"\n elif shotANDplunge == \"167193.2\":\n timeLow = \"Y3\"\n timeHigh = \"Y48\"\n densLow = \"AA3\"\n densHigh = \"AA48\"\n tempLow = \"AB3\"\n tempHigh = \"AB48\"\n rMinRsepLow = \"AE3\"\n rMinRsepHigh = \"AE48\"\n elif shotANDplunge == \"167194.1\":\n timeLow = \"AG3\"\n timeHigh = \"AG71\"\n densLow = \"AI3\"\n densHigh = \"AI71\"\n tempLow = \"AJ3\"\n tempHigh = \"AJ71\"\n rMinRsepLow = \"AM3\"\n rMinRsepHigh = \"AM71\"\n elif shotANDplunge == \"167194.2\":\n timeLow = \"AO3\"\n timeHigh = \"AO67\"\n densLow = \"AQ3\"\n densHigh = \"AQ67\"\n tempLow = \"AR3\"\n tempHigh = \"AR67\"\n rMinRsepLow = \"AU3\"\n rMinRsepHigh = \"AU67\"\n elif shotANDplunge == \"167195.1\":\n timeLow = \"AW3\"\n timeHigh = \"AW60\"\n densLow = \"AY3\"\n densHigh = \"AY60\"\n tempLow = \"AZ3\"\n tempHigh = \"AZ60\"\n rMinRsepLow = \"BC3\"\n rMinRsepHigh = \"BC60\"\n elif shotANDplunge == \"167195.2\":\n timeLow = \"BE3\"\n timeHigh = \"BE59\"\n densLow = \"BG3\"\n densHigh = \"BG59\"\n tempLow = \"BH3\"\n tempHigh = \"BH59\"\n rMinRsepLow = \"BK3\"\n rMinRsepHigh = \"BK59\"\n else:\n return print(\"Incorrect shot/plunge.\")\n\n times = self.returnArray(dataSheet, timeLow, timeHigh)\n dens = self.returnArray(dataSheet, densLow, densHigh)\n temps = self.returnArray(dataSheet, tempLow, tempHigh)\n rmins = self.returnArray(dataSheet, rMinRsepLow, rMinRsepHigh)\n\n # Go from 10^18 m^-3 to just m^-3.\n for index in range(0, len(dens)):\n if dens[index] is None:\n continue\n else:\n dens[index] = dens[index] * 10**18\n\n # Plasma sound speed assuming Te = Ti.\n sound_speeds = [(temp*2 / massD)**0.5 for temp in temps]\n\n self.shot_and_plunge = shotANDplunge\n self.times = times\n self.dens = dens\n self.temps = temps\n self.rmins = rmins\n self.sound_speeds = sound_speeds\n\n # The flux of W off the probe due to sputtering. sputt_flux = yield * flux of dueterium.\n def sputt_flux(ne, Ti, Te):\n # Sputtering energy threshold of tungsten oxide in eV. Note pure W is 160 eV.\n eThresh = 65\n soundSpeed = ((float(Te) + float(Ti)) / massD)**0.5\n\n # Use lambda function for use in integrate,\n func = lambda E: 0.528 * alpha * Z_D * (massD / (u0*(massD + massW))) * 0.059 * (E+3*Ti) ** (1.0/3.0) * soundSpeed * ne * 2 * (E/3.1415)**0.5 * (1/float(Ti))**(1.5) * math.exp(-E/Ti)\n ans, err = integrate.quad(func, eThresh, np.inf)\n\n #print(\"Sputtered Flux: \" + str(ans))\n #print(\"Sputtered Flux Error: \" + str(err/ans * 100) + \"%\")\n\n return ans\n\n\n for probe in [\"A\", \"B\", \"C\"]:\n # Use corresponding size for desired probe.\n if probe==\"A\":\n size = aSize\n elif probe==\"B\":\n size = bSize\n elif probe==\"C\":\n size = cSize\n else:\n print(\"Incorrect probe entry. Should be either A, B, or C.\")\n\n print(\"Calculating loss flux for \" + probe + \" probes...\")\n\n flux_loss = []\n for index in range(0, len(self.temps)):\n Te = self.temps[index]\n ne = self.dens[index]\n cs = self.sound_speeds[index]\n\n # Approx. speed of W entering flux tube.\n v0 = 0.5 * cs\n\n # Get the ionization rate coefficient for a specific temperature.\n ad = atomic.element('tungsten')\n temperatureRange = np.logspace(0,4,100)\n S = ad.coeffs['ionisation']\n f = interpolate.interp1d(temperatureRange, S(0, temperatureRange, ne))\n coeff = f(Te)\n\n # Calculate lamda_ionization.\n lambda_iz = v0 * (ne * coeff)**(-1)\n\n # Fraction ionized in the flux tube (i.e. it will return to the probe)\n frac = 1 - math.exp(-size / lambda_iz)\n #print(\"Fraction Ionized: \" + str(frac))\n\n # Thus the fraction lost is 1-frac of the sputtered flux.\n Ti = Te\n fracFluxLost = (1 - frac) * sputt_flux(ne=ne, Ti=Ti, Te=Te)\n #print(\"Flux Lost: \" + str(fracFluxLost))\n\n flux_loss.append(fracFluxLost)\n\n self.loss_dict[probe] = {\"rminrsep\":self.rmins, \"flux\":flux_loss}", "def diffusive_heat_flux(discr, eos, cv, j):\n if isinstance(eos, MixtureEOS):\n h_alpha = eos.species_enthalpies(cv)\n return sum(h_alpha.reshape(-1, 1) * j)\n return 0", "def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def _calc(self):\r\n u = self._fadefunc(self.xf)\r\n v = self._fadefunc(self.yf)\r\n w = self._fadefunc(self.zf)\r\n\r\n # populate the hashes dict\r\n self._hash()\r\n \r\n # once the hash dict is populated, start calculating the dot product between \r\n # the gradient vector and the distance vectors, which is done in the _grad method.\r\n # finally linearly interpolate the values to get the avg value\r\n # first interpolate in the x-dir, then in y-dir\r\n x1: float = self._lerp(self._grad(self.hashes[\"aaa\"], self.xf, self.yf, self.zf),\r\n self._grad(self.hashes[\"baa\"], self.xf - 1, self.yf, self.zf), u)\r\n\r\n x2: float = self._lerp(self._grad(self.hashes[\"aba\"], self.xf, self.yf - 1, self.zf),\r\n self._grad(self.hashes[\"bba\"], self.xf - 1, self.yf - 1, self.zf), u)\r\n\r\n # the first y-dir lerp\r\n y1: float = self._lerp(x1, x2, v)\r\n\r\n x1: float = self._lerp(self._grad(self.hashes[\"aab\"], self.xf, self.yf, self.zf - 1),\r\n self._grad(self.hashes[\"bab\"], self.xf - 1, self.yf, self.zf - 1), u)\r\n\r\n x2: float = self._lerp(self._grad(self.hashes[\"abb\"], self.xf, self.yf - 1, self.zf - 1),\r\n self._grad(self.hashes[\"bbb\"], self.xf-1, self.yf-1, self.zf-1), u)\r\n\r\n # the second y-dir lerp\r\n y2: float = self._lerp(x1, x2, v)\r\n\r\n # the final noise value, which will be in the range [0, 1]\r\n self.value = (self._lerp(y1, y2, w) + 1)/2\r\n return self.value", "def energy_map(img):\n img_new = img.astype(float) #converting image to float\n total_energy = 0.0 # To store the sum of energy for all channels\n r,c,d = img.shape \n for i in range(d):\n dy = np.zeros([r, c], dtype=float) \n dx = np.zeros([r, c], dtype=float)\n if r > 1:\n dy = np.gradient(img_new[:,:,i], axis=0) #gradient along rows\n if c > 1:\n dx = np.gradient(img_new[:,:,i], axis=1) #gradient along columns\n total_energy += np.absolute(dy) + np.absolute(dx) \n return total_energy #Total energy map for entire image", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def recovery(self):\n\n def exponential(time, tau):\n\n time = list(map(lambda x: float(x), time))\n exponent = np.exp(-np.divide(time, tau))\n return (1 - exponent)\n \n \n initial_guess = [55]\n \n tau = []\n for i in range(self.n_cols):\n current = self.screened_data.iloc[:,i]\n popt = curve_fit(exponential, self.xaxis, current, p0 = initial_guess)\n tau.append(popt[0][0])\n\n print('Median: ', np.median(tau))\n print('Min: ', np.min(tau))\n print('Max: ', np.max(tau))\n return 0\n\n plt.plot(self.xaxis, self.averaged_data, label = 'Average of all models')\n plt.plot(exponential(self.xaxis, *initial_guess), label = 'Initial Guess')\n for i in range(len(popt)):\n plt.plot(self.xaxis, exponential(self.xaxis, *popt[i]), label = 'Best Fit: time = ' + str(*popt[i]) + ' (ms)')\n plt.xlabel('Time (ms)')\n plt.ylabel('Normalized Current')\n plt.title('Recovery from Inactivation')\n plt.legend()\n plt.savefig('recovery_exponential_fit.png')\n return popt", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def heat_flux_out(T_inf, T_old, hc_air, emmi):\n\n #nz = T_old.shape[0]\n ny = T_old.shape[0]\n nx = T_old.shape[1]\n\n Q_out = np.zeros((ny, nx))\n h_eff = np.zeros((ny, nx))\n T_eff = np.zeros((ny, nx))\n for i in range(nx):\n for j in range(ny):\n T_eff[j, i] = ((T_old[j, i]**3) + (T_inf * T_old[j, i]**2)\n + (T_old[j, i] * T_inf**2) + T_inf**3)\n\n h_eff[j, i] = hc_air + (emmi*STEF_BOL_C*T_eff[j, i])\n\n Q_out[j, i] = h_eff[j, i] * (T_old[j, i] - T_inf)\n\n return Q_out", "def linear_heat_transfer(x, t, K_medium, rho_medium, c_medium, T_medium_initial, H_heat_transfer, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n\n h = H_heat_transfer/K_medium\n erfc_factor_1 = erfc(x/(2*np.sqrt(k*t)))\n\n #combine factors in logdomain, since the exp-factors quickly approach\n #infinity while erfc-factor goes to zero\n log_exp_factor_1 = h*x\n log_exp_factor_2 = k*t*h**2\n log_erfc_factor_2 = np.log(erfc(x/(2*np.sqrt(k*t)) + h*np.sqrt(k*t)))\n exp_erfc_factor = np.exp(log_exp_factor_1 + log_exp_factor_2 + log_erfc_factor_2)\n\n return (erfc_factor_1 - exp_erfc_factor)*(T_external_applied - T_medium_initial) + T_medium_initial", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def infectedToRecovered(self):\n\n # initialize a random matrix where around recovery_probability % of the values are True\n recover_prob_arr = np.random.rand(self.space.shape[0],self.space.shape[1]) < self.recovery_probability\n # find the overlap between infected and above array and make those people recovered\n self.space[np.logical_and(self.space == 1, recover_prob_arr)] = 2", "def heatbasic(u0,T,K):\n import numpy as np\n N = len(u0)-1\n\n dx = 1.0/N;\n dt = 0.1/K;\n x = np.linspace(0,1,N+1)\n\n u = np.copy(u0)\n\n u_history = [u]\n\n A = np.zeros( (N+1,N+1) )\n for i in range(1,N):\n A[i,i-1] = 1;\n A[i,i] = -2;\n A[i,i+1] = 1\n A = A * dt/(dx*dx)\n A = A + np.eye(N+1)\n A[0,0] = 0.0;\n A[N,N] = 0.0;\n\n for k in range(K):\n u = np.dot(A,u)\n u_history.append(u)\n\n return u_history", "def heat_loss(dwelling):\n # TODO: what is \"h\"?\n if dwelling.get('hlp') is not None:\n return dict(h=dwelling.hlp * dwelling.GFA, hlp=dwelling.hlp)\n\n UA = sum(e.Uvalue * e.area for e in dwelling.heat_loss_elements)\n A_bridging = sum(e.area for e in dwelling.heat_loss_elements if e.is_external)\n\n if dwelling.get(\"Uthermalbridges\") is not None:\n h_bridging = dwelling.Uthermalbridges * A_bridging\n else:\n h_bridging = sum(x['length'] * x['y'] for x in dwelling.y_values)\n\n h_vent = 0.33 * dwelling.infiltration_ach * dwelling.volume\n\n h = UA + h_bridging + h_vent\n return dict(\n h=h,\n hlp=h / dwelling.GFA,\n h_fabric=UA,\n h_bridging=h_bridging,\n h_vent=h_vent,\n h_vent_annual=monthly_to_annual(h_vent))", "def create_extrapolated_bckg(bolo_name):\n\n\t#Output dir\n\toutput_heat_path = \"../Analyse_\" + bolo_name +\"/ROOT_files/\"\n\n\t##################\n\t#\n\t# Heatonly 2D\n\t#\n\t##################\n\t\n\thheat, file_hheat = PyRPl.open_ROOT_object(\"../Analyse_\" + bolo_name +\"/ROOT_files/\" + bolo_name+\"_thresh.root\", \"hheat\")\n\ttree, file_tree = PyRPl.open_ROOT_object(\"../Fond_ERA_merged/\"+bolo_name+\"_fond.root\", \"data\")\n\t\n\t#Load standard cuts\n\tstandard_cuts = Ana_ut.open_cut_file(bolo_name, \"TCuts.txt\")\t\n\tstandard_cuts = standard_cuts + \"&&KTH<1&&KTH>0&&0.5*(EIB+EID)<0\"\n\t\n\theat_max = hheat.GetMaximum()\n\t\n\n\tl_heatonly = TEventList(\"l_heatonly\")\n\ttree.Draw(\">>l_heatonly\",standard_cuts)\n\tpop_len = l_heatonly.GetN()\n\n\ttry :\n\t\tos.remove(output_heat_path + bolo_name + \"_heatonly_2D_with_heat_cut.root\")\n\texcept OSError:\n\t\tpass\n\n\tfor fraction in [0.1,0.2,0.3,0.4,0.5,0.8,1]:\n\t\tprint fraction\n\t\tfile_heat2D = TFile(output_heat_path + bolo_name + \"_heatonly_2D_with_heat_cut.root\", \"update\")\n\t\thheat2D = TH2F(\"heat2D_fraction_\" + str(fraction), \"heat2D_fraction_\" + str(fraction), 200, -2, 15, 200, -2, 15)\n\t\tfor k in range(pop_len):\n\t\t\tcounter = l_heatonly.GetEntry(k)\n\t\t\ttree.GetEntry(counter)\n\t\t\ttime = 1E6*tree.UT1+ tree.UT2\n\t\t\thr = hheat.GetBinContent(hheat.FindBin(time))\n\t\t\tif hr<= fraction*heat_max:\n\t\t\t\thheat2D.Fill(tree.EC1, tree.EC2)\n\n\t\thheat2D.Write()\n\t\tfile_heat2D.Close() \n\t\tdel hheat2D \n\t\tdel file_heat2D", "def gyroHF(self, GYRO, PFC):\n print(\"Calculating gyro orbit heat loads\")\n log.info(\"Calculating gyro orbit heat loads\")\n #get divertor HF\n qDiv = PFC.qDiv[PFC.PFC_GYROmap] / self.elecFrac\n Pdiv = qDiv * PFC.areas[PFC.PFC_GYROmap]\n #Get fractional multipliers for each helical trace\n gyroFrac = 1.0/GYRO.N_gyroPhase\n vPhaseFrac = 1.0/GYRO.N_vPhase\n vSliceFrac = GYRO.energyFracs\n #qMatrix = np.zeros((GYRO.N_gyroPhase,GYRO.N_vPhase,GYRO.N_vSlice,len(q)))\n Pgyro = np.zeros((GYRO.Nt))\n PNaN = 0.0\n sum=0\n sum1=0\n #loop through intersect record and redistribute power using multipliers\n for gyroPhase in range(GYRO.N_gyroPhase):\n for vPhase in range(GYRO.N_vPhase):\n for vSlice in range(GYRO.N_vSlice):\n idx = GYRO.intersectRecord[gyroPhase,vPhase,vSlice,PFC.CADHOT_GYROmap]\n isNanFrom = np.where(np.isnan(idx)==True)[0] #include NaNs (NaNs = no intersection) index we map from\n notNanFrom = np.where(np.isnan(idx)==False)[0] #dont include NaNs (NaNs = no intersection) index we map from\n notNanTo = idx[~np.isnan(idx)] #indices we map power to\n notNanTo = notNanTo.astype(int) #cast as integer\n isNanTo = idx[np.isnan(idx)] #indices we map power to\n isNanTo = isNanTo.astype(int) #cast as integer\n\n if len(notNanFrom)>0:\n #multiple Froms can light up the same To, so we loop\n for i in range(len(notNanFrom)):\n Pgyro[notNanTo[i]] += Pdiv[notNanFrom[i]]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[notNanFrom[i],vSlice]\n\n if len(isNanFrom)>0:\n PNaN += np.sum(Pdiv[isNanFrom]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[isNanFrom,vSlice])\n\n #print(\"\\nTEST2\")\n #print(GYRO.intersectRecord[0,0,0,1711])\n #print(Pgyro[1711])\n\n GYRO.gyroPowMatrix += Pgyro\n GYRO.gyroNanPower += PNaN\n return", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def _calculate_measurement_error(self): \n \n # Calculate Hartmann Spot\n # FIXME what are factor_1, factor_2 ???\n factor_1, factor_2 = 206265*5.89e-7, 206265*6.5e-7\n term1, term2 = factor_1/self.actuator_spacing, factor_2/self.r0\n hartmann_spot = np.max([term1, term2])\n \n # Calculate SNR \n n_pix=4 # FIXME spreadsheet says not to change this idk why?\n sample_time = 1/(10*self.controller_frequency)\n brightness = (8.9e5)*10**((0-self.guide_star_mag)/2.5)\n n_photons = brightness*sample_time*((100*self.actuator_spacing)**2)\n snr = n_photons/np.sqrt(n_photons + n_pix*(self.sigma_readnoise)**2)\n\n # Calculate noise propagator \n degrees_of_freedom = np.round((np.pi/4) * (self.telescope_diameter/self.actuator_spacing)**2)\n factor_1, factor_2 = 0.0536, 0.0795 # FIXME WHAT THE HECK IS THIS\n if self.aperture == 'circular':\n factor_1, factor_2 = 0.0068, 0.0796\n noise_propagator = np.sqrt(2*(factor_1 + factor_2*np.log(degrees_of_freedom)))\n\n # Calculate close loop averaging\n controller_over_frame = 1/10\n close_loop_averaging = np.sqrt(2*controller_over_frame)*np.arctan(1/(2*controller_over_frame))\n sigma_measurement = noise_propagator * close_loop_averaging * (self.actuator_spacing*1e9) * (hartmann_spot/snr*4.84814e-6)\n self.sigma_measurement = sigma_measurement # in nm", "def compute(self, Rs, D):\n self.Rs = Rs\n self.D = D\n self.M = (self.Rs * c**2 * au) / (2 * G * M_sun)\n print(\"M = %.1e M☉\\t%.2e Kg\" % (self.M, self.M*M_sun))\n print(\"Rs = %s ua\\t%.2e m\" % (self.Rs, self.Rs*au))\n print(\"D = %s ua\\t%.2e m\\n\" % (self.D, self.D*au))\n\n vrai_debut = time.process_time()\n\n\n seen_angle, deviated_angle = self.trajectories()\n\n self.interpolation = self.interpolate(seen_angle, deviated_angle)\n\n if self.display_interpolation is True:\n xmin = np.min(seen_angle)\n xmax = np.max(seen_angle)\n seen_angle_splin = np.linspace(xmin, xmax, 20001)\n deviated_angle_splin = self.interpolation(seen_angle_splin)\n plt.figure('Trajectories interpolation')\n plt.clf()\n plt.title(\"Light deviation interpolation\", va='bottom')\n plt.xlabel('seen angle(°)')\n plt.ylabel('deviated angle(°)')\n plt.plot(seen_angle, deviated_angle, 'o')\n plt.plot(seen_angle_splin, deviated_angle_splin)\n plt.grid()\n #plt.savefig('interpolation.png', dpi=250, bbox_inches='tight')\n plt.draw()\n#\n print(\"last angle\", seen_angle[-1])\n print(\"trajectories time: %.1f\" % (time.process_time()-vrai_debut))\n\n img_matrix_x, img_matrix_y = self.create_matrices()\n\n self.img_matrix_x = img_matrix_x\n self.img_matrix_y = img_matrix_y\n\n self.img2 = self.img_pixels(self.img_debut)\n\n vrai_fin = time.process_time()\n print(\"\\nglobal computing time: %.1f\\n\" % (vrai_fin-vrai_debut))", "def import_heat_data(self):\n worksheet = (\n xlrd.open_workbook(filename=self.filename_heat).sheet_by_index(0)\n ) \n self.exh.corrected_reading = np.array(worksheet.col_values(0,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.exh.datum = worksheet.cell_value(2,4) # manometer datum (in) \n self.exh.pressure_drop = ( (self.exh.corrected_reading -\n self.exh.datum) * 2. * self.H2O_kPa ) \n # pressure drop across heat exchanger (kPa)\n self.cummins.torque = np.array(worksheet.col_values(1,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))\n self.exh.T_inlet_array = np.array(worksheet.col_values(2,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.exh.T_outlet_array = np.array(worksheet.col_values(3,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.cool.T_inlet_array = np.array(worksheet.col_values(5,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.cool.T_outlet_array = np.array(worksheet.col_values(4,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))", "def calibrate(science_list_fname, master_flat_fname, master_dark_fname, hp_map_fname, bp_map_fname, mask_bad_pixels = False,\n clean_Bad_Pix=True, replace_nans=True, background_fname = None, outdir = None):\n\n #Get the list of science frames\n #science_list = np.loadtxt(science_list_fname, dtype=str)\n science_list = science_list_fname\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the master flat\n master_flat_hdu = f.open(master_flat_fname)\n master_flat = master_flat_hdu[0].data\n print((\"Dividing each file by {}\".format(master_flat_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the bad pixel map from flat\n bp_map_hdu = f.open(bp_map_fname)\n bad_pixel_map = bp_map_hdu[0].data\n bad_pixel_map_bool = np.array(bad_pixel_map, dtype=bool)\n print((\"Using bad pixel map {}\".format(bp_map_fname)))\n\n #now if hot pixel map from dark is also given\n if hp_map_fname != None:\n hp_map_hdu = f.open(hp_map_fname)\n hot_pixel_map = hp_map_hdu[0].data\n bad_pixel_map_bool = np.logical_or(bad_pixel_map_bool, hot_pixel_map.astype(bool) )\n\n\n if background_fname != None:\n background_hdu = f.open(background_fname)\n background = background_hdu[0].data\n print(\"Subtracting background frame {} from all science files\".format(background_fname))\n\n\n for fname in science_list:\n #Open the file\n print((\"Calibrating {}\".format(fname\n )))\n hdu = f.open(fname)\n data = hdu[0].data\n science_exp_time = hdu[0].header['EXPTIME']\n\n if dark_exp_time != science_exp_time:\n warnings.warn(\"The master dark file doesn't have the same exposure time as the data. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = science_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #Subtract the dark, divide by flat\n redux = ((data - factor*master_dark)/master_flat)\n #get rid of crazy values at bad pixel\n redux = redux*~bad_pixel_map_bool\n\n if background_fname != None:\n redux -= background\n\n if clean_Bad_Pix:\n # plt.plot(bad_pixel_map_bool)\n redux = cleanBadPix(redux, bad_pixel_map_bool)\n #redux = ccdproc.cosmicray_lacosmic(redux, sigclip=5)[0]\n\n # redux = ccdproc.cosmicray_median(redux, mbox=7, rbox=5, gbox=7)[0]\n\n #Mask the bad pixels if the flag is set\n if mask_bad_pixels:\n redux *= ~bad_pixel_map_bool\n\n if replace_nans:\n # nan_map = ~np.isfinite(redux)\n # redux = cleanBadPix(redux, nan_map)\n # plt.imshow(redux-after)\n nanmask = np.isnan(redux) #nan = True, just in case this is useful\n redux = np.nan_to_num(redux)\n\n #Put the cablibrated data back in the HDU list\n hdu[0].data = redux\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Subtracting {} from each flat file\".format(master_dark_fname)\n hdu[0].header['HISTORY'] = \"Dividing each file by {}\".format(master_flat_fname)\n\n if background_fname != None:\n hdu[0].header['HISTORY'] = \"Subtracted background frame {}\".format(background_fname)\n\n if mask_bad_pixels:\n hdu[0].header['HISTORY'] = \"Masking all bad pixels found in {}\".format(bp_map_fname)\n\n if clean_Bad_Pix:\n hdu[0].header['HISTORY'] = \"Cleaned all bad pixels found in {} using a median filter\".format(bp_map_fname)\n\n # #Append the bad pixel list to the HDU list\n # hdu.append(f.PrimaryHDU([bad_pixel_map]))\n # hdu[1].header['HISTORY'] = \"Appending bad pixel map :{}\".format(bp_map_fname)\n # hdu[1].header['HISTORY'] = \"0 = good pixel\"\n # hdu[1].header['HISTORY'] = \"1 = bad pixel from flat fields\"\n # hdu[1].header['HISTORY'] = \"2 = hot pixel from darks\"\n\n outname = fname.split('.')[0]+\"_calib.fits\"\n\n #if an output directory is specified we can write out to that directory instead\n #making sure to take only the stuff after the last '/' to avoid directory issues from fname\n if outdir:\n outname = outdir + fname.split('/')[-1]\n\n print((\"Writing calibrated file to {}\".format(outname)))\n #Save the calibrated file\n hdu.writeto(outname, overwrite=True)\n\n # f.PrimaryHDU(redux).writeto('redux_'+i, overwrite = True)", "def gyroHF2(self, GYRO, PFC):\n print(\"Calculating gyro orbit heat loads\")\n log.info(\"Calculating gyro orbit heat loads\")\n #get divertor HF\n qDiv = PFC.qDiv[PFC.PFC_GYROmap] / self.elecFrac\n Pdiv = qDiv * PFC.areas[PFC.PFC_GYROmap]\n #Get fractional multipliers for each helical trace\n gyroFrac = 1.0/GYRO.N_gyroPhase\n vPhaseFrac = 1.0/GYRO.N_vPhase\n vSliceFrac = GYRO.energyFracs\n #qMatrix = np.zeros((GYRO.N_gyroPhase,GYRO.N_vPhase,GYRO.N_vSlice,len(q)))\n Pgyro = np.zeros((GYRO.Nt))\n PNaN = 0.0\n sum=0\n sum1=0\n #loop through intersect record and redistribute power using multipliers\n for gyroPhase in range(GYRO.N_gyroPhase):\n for vPhase in range(GYRO.N_vPhase):\n for vSlice in range(GYRO.N_vSlice):\n idx = GYRO.intersectRecord[gyroPhase,vPhase,vSlice,PFC.CADHOT_GYROmap]\n hdotn = np.abs(GYRO.hdotn[gyroPhase,vPhase,vSlice,PFC.CADHOT_GYROmap])\n isNanFrom = np.where(np.isnan(idx)==True)[0] #include NaNs (NaNs = no intersection) index we map from\n notNanFrom = np.where(np.isnan(idx)==False)[0] #dont include NaNs (NaNs = no intersection) index we map from\n notNanTo = idx[~np.isnan(idx)] #indices we map power to\n notNanTo = notNanTo.astype(int) #cast as integer\n isNanTo = idx[np.isnan(idx)] #indices we map power to\n isNanTo = isNanTo.astype(int) #cast as integer\n\n if len(notNanFrom)>0:\n #multiple sources can load the same target face, so we loop\n for i in range(len(notNanFrom)):\n Pgyro[notNanTo[i]] += Pdiv[notNanFrom[i]]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[notNanFrom[i],vSlice]\n\n if len(isNanFrom)>0:\n PNaN += np.sum(Pdiv[isNanFrom]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[isNanFrom,vSlice])\n\n\n GYRO.gyroPowMatrix += Pgyro\n GYRO.gyroNanPower += PNaN\n return", "def heat_method(self, row):\n if row['Indexer'] == \"Heating\":\n return row['HeatCool']\n return 0", "def power_output_existing_thermal_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last() and t != m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_2[g, y, s, t]\r\n + m.sigma_20[g, y, s, t] - m.sigma_20[g, y, s, t + 1]\r\n - m.sigma_23[g, y, s, t] + m.sigma_23[g, y, s, t + 1]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n elif y != m.Y.last() and t == m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_2[g, y, s, t]\r\n + m.sigma_20[g, y, s, t]\r\n - m.sigma_23[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n elif y == m.Y.last() and t != m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_2[g, y, s, t]\r\n + m.sigma_20[g, y, s, t] - m.sigma_20[g, y, s, t + 1]\r\n - m.sigma_23[g, y, s, t] + m.sigma_23[g, y, s, t + 1]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n elif y == m.Y.last() and t == m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_2[g, y, s, t]\r\n + m.sigma_20[g, y, s, t]\r\n - m.sigma_23[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n else:\r\n raise Exception(f'Unhandled case: {g, y, s, t}')", "def old():\n therm = [[300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.]]\n ts = np.linspace(0, 50, 1000)\n\n #odvod temperature bo vsota gradientov (diferencialov) z desne in z leve glede x\n #dT/dt[i] = K/x^2 * (temperature[i-1]- 2*temperature[i] + temperature[i+1])\n #razen ce je robna tocka\n #potem je treba nekaj scarat - robna bo funkcija\n def odvod(indeks, arr, K, time):\n odvodt = K * (arr[indeks-1][time] - 2*arr[indeks][time] + arr[indeks+1][time])\n return odvodt\n\n def robna(time):\n return 5*m.cos(0.05*time)\n\n\n K = 0.02\n x = 0.003\n\n def main_old():\n t = 0\n dt = 50. / 1000.\n for time in ts:\n for i in range(0,9):\n therm[i].append(therm[i][t] + (robna(time) if i==0 else odvod(i, therm, K, t)*dt/(x**2)))\n therm[9].append(300.)\n t+=1\n\n import matplotlib.pyplot as plt\n\n plt.plot(ts[:], therm[4][:-1], label = 'T(t)')\n plt.show()\n \n main_old()", "def get_recovery_variables(self):\n if self.m is None or self.dirty is True:\n m, n, h = self.simulator.get_recovery_variables()\n n_compartments = self.neuron_collection.total_compartments()\n self.m = np.array(m).reshape([len(m) / n_compartments, n_compartments])\n self.n = np.array(n).reshape([len(n) / n_compartments, n_compartments])\n self.h = np.array(h).reshape([len(h) / n_compartments, n_compartments])\n\n self.dirty = False\n t = int(self.T / self.dt)\n return self.m[:t, :], self.n[:t, :], self.h[:t, :]", "def calc_reduction_diesel_used (self):\n self.reduction_diesel_used = self.diesel_equiv_captured - \\\n self.loss_heat_recovery\n #~ print 'self.reduction_diesel_used',self.reduction_diesel_used", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fn_tp = fn + tp\n fp_tp = fp + tp\n\n h1 = -((fn_tp / n) * math.log2(fn_tp / n) +\n (1 - fn_tp / n) * math.log2(1 - fn_tp / n))\n\n h2 = -((fp_tp / n) * math.log2(fp_tp / n) +\n (1 - fp_tp / n) * math.log2(1 - fp_tp / n))\n\n p00 = 1 if tn == 0 else (tn / n)\n p01 = 1 if fn == 0 else (fn / n)\n p10 = 1 if fp == 0 else (fp / n)\n p11 = 1 if tp == 0 else (tp / n)\n\n h12 = -((tn / n) * math.log2(p00) +\n (fn / n) * math.log2(p01) +\n (fp / n) * math.log2(p10) +\n (tp / n) * math.log2(p11))\n\n mi = h1 + h2 - h12\n\n vi = h1 + h2 - 2 * mi\n return vi", "def effective_temperature(R,times):\n T_eff = np.empty((len(R),times))\n #T_c = ((3 * G * M_body) / (8 * np.pi * sigma)) #constants for a blackbody \n for i in range (len(R)):\n for t in range(times): \n T_eff[i][t] = ((m_dot[i][t])/(R[i]**3.0))**(1.0/4.0)\n return T_eff", "def get_heat_loss_coefficient_of_partition() -> float:\n return 1 / 0.46", "def getHeatFlux(self, T):\n\t\tQ = self.heat_transfer_coefficient * (self.T_wall - T)\n\t\treturn Q", "def calc_heat_sum(tmin, tmax, tbase=6.0):\n tmax = min(tmax, 30.0)\n tx = (tmin + tmax) / 2.0\n return max(tx - tbase, 0.0)", "def _calculate_heater_resistance(self, target_temp):\n if target_temp > 400: #Maximum temperature\n target_temp = 400\n\n var1 = (calGH1 / 16.0) + 49.0\n var2 = ((calGH2 / 32768.0) * 0.0005) + 0.00235\n var3 = calGH3 / 1024.0\n var4 = var1 * (1.0 + (var2 * target_temp))\n var5 = var4 + (var3 * self.calAmbTemp)\n res_heat = 3.4 * ((var5 * (4 / (4 + calResHeatRange)) * (1 / (1 + (calResHeatVal * 0.002)))) - 25)\n\n return int(res_heat)", "def power_output_candidate_thermal_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last() and t != m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_3[g, y, s, t]\r\n + m.sigma_20[g, y, s, t] - m.sigma_20[g, y, s, t + 1]\r\n - m.sigma_23[g, y, s, t] + m.sigma_23[g, y, s, t + 1]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n elif y != m.Y.last() and t == m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_3[g, y, s, t]\r\n + m.sigma_20[g, y, s, t]\r\n - m.sigma_23[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n elif y == m.Y.last() and t != m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_3[g, y, s, t]\r\n + m.sigma_20[g, y, s, t] - m.sigma_20[g, y, s, t + 1]\r\n - m.sigma_23[g, y, s, t] + m.sigma_23[g, y, s, t + 1]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n elif y == m.Y.last() and t == m.T.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_3[g, y, s, t]\r\n + m.sigma_20[g, y, s, t]\r\n - m.sigma_23[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])))\r\n == 0)\r\n\r\n else:\r\n raise Exception(f'Unhandled case: {g, y, s, t}')", "def compute_audit(self):\r\n \r\n time = datetime.now()\r\n H0_dist = []\r\n Ha_dist = []\r\n\r\n for i in range(0, self.m):\r\n #print(\"CURRENT H0 dist: \", H0_dist)\r\n #try:\r\n H0_dist = self.next_round_dist(True, H0_dist, i)\r\n Ha_dist = self.next_round_dist(False, Ha_dist, i)\r\n '''\r\n except Exception as e:\r\n \r\n print(e)\r\n self.bad = H0_dist\r\n self.bad2 = Ha_dist\r\n return\r\n '''\r\n self.decide_k_min(H0_dist, Ha_dist, i)\r\n #print('ROUND INDEX: ',i,'kminschedl: ',self.k_min_sched[i])\r\n\r\n #self.truncate_dist(H0_dist, i)\r\n H0_dist = H0_dist[:self.k_min_sched[i]]\r\n #self.truncate_dist(Ha_dist, i)\r\n Ha_dist = Ha_dist[:self.k_min_sched[i]]\r\n \r\n #print(\"The outputs: k_mins, LR denominator, LR numerator, 1 / LR (or alpha').\")\r\n #print(self.k_min_sched, '\\n', self.pr_H0_sched, '\\n', self.pr_Ha_sched, '\\n', \r\n #self.risk_sched)\r\n #print(\"Output suppressed. Use instance variables k_min_sched, pr_H0_sched, pr_Ha_sched, risk_sched\")\r\n\r\n #print(\"Time elapsed:\", datetime.now() - time)\r", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fn_tp = fn + tp\n fp_tp = fp + tp\n\n h1 = -((fn_tp / n) * math.log2(fn_tp / n) +\n (1 - fn_tp / n) * math.log2(1 - fn_tp / n))\n\n h2 = -((fp_tp / n) * math.log2(fp_tp / n) +\n (1 - fp_tp / n) * math.log2(1 - fp_tp / n))\n\n p00 = 1 if tn == 0 else (tn / n)\n p01 = 1 if fn == 0 else (fn / n)\n p10 = 1 if fp == 0 else (fp / n)\n p11 = 1 if tp == 0 else (tp / n)\n\n h12 = -((tn / n) * math.log2(p00) +\n (fn / n) * math.log2(p01) +\n (fp / n) * math.log2(p10) +\n (tp / n) * math.log2(p11))\n\n mi = h1 + h2 - h12\n return mi", "def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.psifunc.ev(R, Z, dx=1)/R\n \n return bt**2 + br**2 + bz**2\n \n\n def b_bmax2(R,Z,psi):\n b2 = b2_func(R,Z,psi)\n return b2 / np.max(b2)\n \n def b_bmax(R,Z,psi):\n return np.sqrt(b_bmax2(R,Z,psi))\n \n # Evaluate the flux-surface averaged h^2 and h, as required\n fsa_h2 = self.fs_average(b_bmax2)\n fsa_h = self.fs_average(b_bmax)\n \n # This is the function which gets flux-surface averaged in equation (7)\n def ftl_func(R,Z,psi):\n h = b_bmax(R,Z,psi)\n h2 = b_bmax2(R,Z,psi)\n \n return (1 - (np.sqrt(1 - h) * (1 + 0.5 * h)))/h2\n \n \n # Equation 6, 7 in Lin-Liu\n fs_ftu = 1 - fsa_h2 / fsa_h**2 * (1 - np.sqrt(1 - fsa_h) * (1 + 0.5 * fsa_h))\n fs_ftl = 1 - fsa_h2 * self.fs_average(ftl_func)\n # Equation 18, 19 \n om = 0.75\n self.fs_ft = om*fs_ftu + (1-om)*fs_ftl", "def sim_an_reheat(neighborhood, Tmax, Tmin, iterations):\n\n temp = Tmax\n\n # set iteration number to 0 at start\n n = 0\n\n plot_list = []\n\n current_costs = neighborhood.get_total_costs()\n cooling_rate = float(Tmin/Tmax)**(1/iterations)\n\n reheat_moment = 0.99\n reheat_factor = 0.7\n\n reheated = False\n\n while n < iterations:\n\n\n if n == iterations * reheat_moment and not reheated:\n print(f\"costs before: {neighborhood.get_total_costs()}\")\n n -= (n * reheat_factor)\n # print(f\"n aftere: {n}\")\n # print(\"yes\")\n reheated = True\n\n\n # adjust temperature according to exponential cooling scheme\n temp = Tmax * (float(cooling_rate)**float(n))\n #\n # print(f\"{n}, {temp}\")\n\n swap_succes = False\n while not swap_succes:\n cable_1 = random.choice(neighborhood.cables)\n cable_2 = random.choice(neighborhood.cables)\n swap_succes = neighborhood.swap_connection(cable_1, cable_2)\n\n new_costs = neighborhood.get_total_costs()\n if (acceptance_probability(current_costs, new_costs, temp) > random.random()):\n current_costs = new_costs\n else:\n cable_1 = neighborhood.cables[-1]\n cable_2 = neighborhood.cables[-2]\n neighborhood.swap_connection(cable_1, cable_2)\n\n plot_list.append(current_costs)\n n += 1\n\n print(f\"costs after: {neighborhood.get_total_costs()}\")", "def get_fuel_total_saved (self):\n #~ print self.lost_heat_recovery\n #~ print self.intertie_offset_generation_fuel_used\n #~ print self.pre_intertie_generation_fuel_used\n #~ gen_eff = self.cd[\"diesel generation efficiency\"]\n #~ fuel_used = self.intertie_offset_generation / gen_eff\n\n generation_diesel_reduction = \\\n np.array(self.pre_intertie_generation_fuel_used\\\n [:self.actual_project_life])\n return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\\\n generation_diesel_reduction", "def reconstruct_missing(self, \n num_iters, \n recon_images, \n which_pixels):\n \n N = recon_images.shape[0]\n \n if self.num_hidden == 0:\n \n tE = -self.compute_energy(self.x, 1)\n \n elif self.num_hidden > 0:\n \n tE = -self.compute_free_energy(self.x)\n \n do_computation = theano.function(inputs =[self.x], outputs = tE)\n \n for xi in range(N):\n \n print(\"Reconstructing test image --- %d\"%xi)\n \n for iter_ind in range(num_iters):\n \n permuted_nodes = list(np.random.permutation(self.num_vars))\n \n for d in permuted_nodes:\n \n if which_pixels[xi,d]:\n \n recon_images[xi, d] = 1\n \n E1 = do_computation([recon_images[xi, :]])\n \n recon_images[xi, d] = 0\n \n E0 = do_computation([recon_images[xi, :]])\n \n if E1>E0:\n recon_images[xi, d] = 1\n else:\n recon_images[xi, d] = 0", "def IR():\n s = np.array(\n [2.40774137,2.287696084,2.203613927,2.048710132,1.899829585,1.591776247,\n 2.021218754,2.572949552,3.298381484,3.635993426,3.788266224,3.8307278,3.834208811]\n )\n\n TI = np.array([50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 1500, 2000, 3000])\n\n comp1 = s * np.array([-159.1,-134.2,-109.1,-64.7,25.0,40.1,88.6,126.8,187.6,219.4,245.4,253.6,256.1])\n comp2 = s * np.array([-368.3,-356.9,-343.8,-318.1,-292.0,-242.5,-199.3,-158.4,-68.8,14.2,131.9,219.5,333.5])\n comp3 = s * np.array([-77.5,-51.9,-29.8,9.9,40.2,85.7,115.4,135.1,160.1,167.6,172.3,171.7,171.8])\n comp4 = s * np.array([-265.0,-240.6,-216.7,-170.5,-128.2,-53.5,9.6,62.3,159.7,223.8,296.5,328.3,346.7])\n comp5 = s * np.array([-346.5,-328.9,-312.1,-278.5,-244.4,-182.3,-128.0,-80.0,30.8,109.3,225.1,299.5,372.2])\n\n comp = [comp1, comp2, comp3, comp4, comp5]\n MSE = []\n colors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#9467bd\"]\n x_new = np.linspace(0, 3000, 10000)\n for i, j, k in zip(comp, colors, np.arange(1, 6)):\n plt.scatter(TI, i, c=j)\n # popt, _ = curve_fit(MZ, TI, i, p0=np.array([200, 220, 300]))\n popt, _ = curve_fit(MZ, TI, i, p0=np.array([300, 220]))\n # M_z0, T1, M0 = popt\n M0, T1 = popt\n y_new = MZ(x_new, *popt)\n plt.plot(x_new, y_new, \"--\", c=j, label=f\"Fit Comp. {k:d} : $T_1$={T1:3.2f}\")\n MSE.append(mean_squared_error(i,y_new[TI]))\n print(MSE)\n print(np.mean(MSE))\n plt.grid()\n plt.legend(loc=\"best\")\n plt.xlabel(\"TI\")\n plt.ylabel(r\"Singal Intensity $M_z$\")\n plt.show()", "def calculate_HC(rootgrps,depth_from,depth_to,lat,lon,calculate_errors = False):\n depth_from, depth_to = depth_ind(rootgrps, depth_from, depth_to)\n depths = rootgrps[0][\"depth\"][depth_from:depth_to]\n xs = np.arange(depths[0],depths[len(depths)-1],1) #depths spaced by 1m\n ones = np.zeros((len(xs))) #ones for the dot product with thetas (equivalent to summing over thetas)\n for i in range(len(ones)):\n ones[i] = 1\n HC = np.zeros((len(rootgrps)))\n \n if calculate_errors == False or depth_to>25:\n for i in range(len(rootgrps)):\n temps = rootgrps[i][\"temperature\"][0,depth_from:depth_to,lat+83,lon-1]\n cs = interp1d(depths,temps,kind='cubic')\n theta = cs(xs)\n HC[i] = np.dot(theta,ones) #sums over all interpolated temperatures more efficiently\n HC *= rho*Cp\n return HC\n \n else:\n HC_Err = np.zeros((len(rootgrps)))\n for i in range(len(rootgrps)):\n temps = rootgrps[i][\"temperature\"][0,depth_from:depth_to,lat+83,lon-1]\n temps_Err = rootgrps[i][\"temperature_uncertainty\"][0,depth_from:depth_to,lat+83,lon-1]\n cs = interp1d(depths,temps,kind='cubic')\n cs_err = interp1d(depths,temps_Err,kind='cubic')\n theta = cs(xs)\n #theta_err = cs_err(xs)\n HC[i] = np.dot(theta,ones)\n #HC_Err[i] = np.dot(theta_e)\n HC *= rho*Cp\n HC_Err *= rho*Cp\n return HC", "def diffusion(nt, nx, tmax, xmax, nu):\n # Increments\n dt = tmax/(nt-1)\n dx = xmax/(nx-1)\n plate_length = xmax\n max_iter_time = tmax\n\n alpha = nu\n delta_x = dx\n delta_t = (delta_x ** 2)/(4 * alpha)\n \n x = np.zeros(nx)\n t = np.zeros(nt)\n\n #delta_t = (delta_x ** 2)/(4 * alpha)\n gamma = (alpha * delta_t) / (delta_x ** 2)\n\n # Initialize solution: the grid of u(k, i)\n u = np.empty((nx, nt))\n\n # Initial condition everywhere inside the grid\n u_initial = np.random.uniform(low=28.5, high=55.5, size=(nx))\n\n # Boundary conditions\n u_top = 100\n u_bottom = 0.0\n\n # Set the initial condition\n u[:,0] = u_initial\n\n # Set the boundary conditions\n u[(nx-1):,:] = u_top\n u[:1,:] = u_bottom\n\n if dt <= (dx**2)/(2*alpha):\n print(\"you are lucky\")\n else: \n print(\"hmmm\",dt,(dx**2)/(4*alpha))\n for k in range(0, nt-1):\n for i in range(1, nx-1):\n u[i,k + 1] = gamma * (u[i+1][k] + u[i-1][k] - 2*u[i][k]) + u[i][k]\n\n\n # X Loop\n for i in range(0,nx):\n x[i] = i*dx\n # T Loop\n for i in range(0,nt):\n t[i] = i*dt\n return u, x, t", "def compute(self): \n Ex=np.zeros((self.nx,self.ny+1))\n Ey=np.zeros((self.nx+1,self.ny))\n Hz=np.zeros((self.nx,self.ny))\n Hzx=np.zeros((self.nx,self.ny))\n Hzy=np.zeros((self.nx,self.ny))\n \n imx = []\n #eps, mu = self.makeenv()\n mu=np.ones((self.nx,self.ny))*const.mu_0\n eps = self.luneberg(int(self.nx/2), int(self.ny*2/3), self.R)\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n\n c = self.dt/(eps*self.ds)\n d = self.dt/(mu* self.ds)\n \n sigma = self.pml(eps, mu, 20)\n cax = 1 - (sigma[0] * self.dt / eps)\n cay = 1 - (sigma[1] * self.dt / eps)\n dax = 1 - (sigma[2] * self.dt / mu) \n day = 1 - (sigma[3] * self.dt / mu)\n \n bar = progressbar.ProgressBar()\n for n in bar(range(self.nt+1)):\n Ex[:,1:-1] = (cay[:,1:]+cay[:,:-1])/2*Ex[:,1:-1] + (c[:,1:]+c[:,:-1])/2*(Hz[:,1:]-Hz[:,:-1])\n Ey[1:-1,:] = (cax[1:,:]+cax[:-1,:])/2*Ey[1:-1,:] - (c[1:,:]+c[:-1,:])/2*(Hz[1:,:]-Hz[:-1,:])\n \n Hzx = dax*Hzx - d*(Ey[1:,:] - Ey[:-1,:])\n Hzy = day*Hzy + d*(Ex[:,1:] - Ex[:,:-1]) \n Hz = Hzx + Hzy + self.actualsource(self.source, self.f, n, self.dt) \n \n if(n%self.interval == 0): imx.append(Ex[:self.nx,:self.ny]**2 + Ey[:self.nx, :self.ny]**2)\n\n return imx", "def heat_vaporization_func(ts):\n heat_vaporization = np.copy(ts).astype(np.float64)\n heat_vaporization -= 273.15\n heat_vaporization *= -0.00236\n heat_vaporization += 2.501\n heat_vaporization *= 1E6\n return heat_vaporization.astype(np.float32)", "def process_tir_image(ds, data_res, t_thresh=-50, min_mcs_size=5000):\n ctt = (ds['tb']).squeeze()-273.15\n min_pix_nb = min_mcs_size / data_res**2\n\n max_pix_nb = 300000 / data_res**2 # this is to capture satellite artefacts that come in large contiguous stripes.\n labels, goodinds = mcs_define(ctt.values, t_thresh, minmax_area=[min_pix_nb, max_pix_nb]) # 7.7x7.7km = 64km2 per pix in gridsat? 83 pix is 5000km2\n dic = dictionary()\n #plt.figure()\n #plt.pcolormesh(labels)\n #plt.colorbar()\n #plt.show()\n for g in goodinds:\n\n if g==0:\n continue\n\n pos = np.where(labels==g)\n npos = np.where(labels!=g)\n datestr = str(int(ctt['time.year'].values))+'-'+str(int(ctt['time.month'].values)).zfill(2)+'-'+str(int(ctt['time.day'].values)).zfill(2)+'_'+\\\n str(int(ctt['time.hour'].values)).zfill(2)+':'+str(int(ctt['time.minute'].values)).zfill(2)\n \n dic['date'].append(datestr)\n dic['month'].append(int(ctt['time.month']))\n dic['hour'].append(int(ctt['time.hour']))\n dic['year'].append(int(ctt['time.year']))\n dic['day'].append(int(ctt['time.day']))\n dic['minute'].append(int(ctt['time.minute']))\n\n storm = ctt.copy()\n storm.values[npos] = np.nan\n tmin_pos = np.nanargmin(storm.values)\n tpos_2d = np.unravel_index(tmin_pos, storm.shape)\n \n latmin = np.nanmin(ctt.lat.values[pos[0]])\n latmax = np.nanmax(ctt.lat.values[pos[0]])\n lonmin = np.nanmin(ctt.lon.values[pos[1]])\n lonmax = np.nanmax(ctt.lon.values[pos[1]])\n dic['area'].append(np.sum(np.isfinite(storm.values))*data_res**2)\n dic['70area'].append(np.sum(storm.values<=-70)*data_res**2)\n dic['minlon'].append(lonmin)\n dic['minlat'].append(latmin)\n dic['maxlon'].append(lonmax)\n dic['maxlat'].append(latmax)\n dic['clon'].append(lonmin + (lonmax - lonmin)/2)\n dic['clat'].append(latmin + (latmax - latmin)/2)\n dic['tmin'].append(np.nanmin(storm))\n dic['tminlat'].append(float(ctt.lat[tpos_2d[0]].values))\n dic['tminlon'].append(float(ctt.lon[tpos_2d[1]].values))\n dic['tmean'].append(float(np.nanmean(storm)))\n dic['tp1'].append(float(np.nanpercentile(storm, 1)))\n dic['tp99'].append(float(np.nanpercentile(storm, 99)))\n dic['stormID'].append(datestr + '_' + str(g))\n dic['cloudMask'].append(labels==g)\n dic['tir'].append(storm.values)\n\n # for k in dic.keys():\n # print(k, len(dic[k]))\n return dic", "def calc_cop_CCGT(GT_size_W, T_sup_K, fuel_type):\n\n it_len = 50\n\n # create empty arrays\n range_el_output_CC_W = np.zeros(it_len)\n range_q_output_CC_W = np.zeros(it_len)\n range_eta_el_CC = np.zeros(it_len)\n range_eta_thermal_CC = np.zeros(it_len)\n range_q_input_CC_W = np.zeros(it_len)\n\n # create range of electricity output from the GT between the minimum and nominal load\n range_el_output_from_GT_W = np.linspace(GT_size_W * GT_MIN_PART_LOAD, GT_size_W, it_len)\n\n # calculate the operation data at different electricity load\n for i in range(len(range_el_output_from_GT_W)):\n el_output_from_GT_W = range_el_output_from_GT_W[i]\n\n # combine cycle operation\n CC_operation = calc_CC_operation(el_output_from_GT_W, GT_size_W, fuel_type, T_sup_K)\n range_el_output_CC_W[i] = CC_operation['el_output_W'] # Electricity output from the combined cycle\n range_q_output_CC_W[i] = CC_operation['q_output_ST_W'] # Thermal output from the combined cycle\n range_eta_el_CC[i] = CC_operation['eta_el'] # el. efficiency\n range_eta_thermal_CC[i] = CC_operation['eta_thermal'] # thermal efficiency\n\n range_q_input_CC_W[i] = range_q_output_CC_W[i] / range_eta_thermal_CC[i] # thermal energy input\n\n # create interpolation functions as a function of heat output\n el_output_interpol_with_q_output_W = interpolate.interp1d(range_q_output_CC_W, range_el_output_from_GT_W,\n kind=\"linear\")\n q_input_interpol_with_q_output_W = interpolate.interp1d(range_q_output_CC_W, range_q_input_CC_W, kind=\"linear\")\n\n # create interpolation functions as a function of thermal energy input\n eta_el_interpol_with_q_input = interpolate.interp1d(range_q_input_CC_W, range_eta_el_CC,\n kind=\"linear\")\n\n q_output_min_W = min(range_q_output_CC_W)\n q_output_max_W = max(range_q_output_CC_W)\n\n return {'el_output_fn_q_output_W': el_output_interpol_with_q_output_W,\n 'q_input_fn_q_output_W': q_input_interpol_with_q_output_W,\n 'q_output_min_W': q_output_min_W, 'q_output_max_W': q_output_max_W,\n 'eta_el_fn_q_input': eta_el_interpol_with_q_input}", "def fluxes_to_heating_rate(example_dict):\n\n down_flux_matrix_w_m02 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_DOWN_FLUX_NAME\n )\n up_flux_matrix_w_m02 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_UP_FLUX_NAME\n )\n pressure_matrix_pascals = get_field_from_dict(\n example_dict=example_dict, field_name=PRESSURE_NAME\n ) + 0.\n\n dummy_pressure_matrix_pascals = (\n pressure_matrix_pascals[:, [-1]] +\n (pressure_matrix_pascals[:, [-1]] - pressure_matrix_pascals[:, [-2]])\n )\n pressure_matrix_pascals = numpy.concatenate(\n (pressure_matrix_pascals, dummy_pressure_matrix_pascals), axis=1\n )\n\n net_flux_matrix_w_m02 = down_flux_matrix_w_m02 - up_flux_matrix_w_m02\n dummy_net_flux_matrix_w_m02 = (\n net_flux_matrix_w_m02[:, [-1]] +\n (net_flux_matrix_w_m02[:, [-1]] - net_flux_matrix_w_m02[:, [-2]])\n )\n net_flux_matrix_w_m02 = numpy.concatenate(\n (net_flux_matrix_w_m02, dummy_net_flux_matrix_w_m02), axis=1\n )\n\n coefficient = GRAVITY_CONSTANT_M_S02 / DRY_AIR_SPECIFIC_HEAT_J_KG01_K01\n\n # heating_rate_matrix_k_day01 = DAYS_TO_SECONDS * coefficient * (\n # numpy.gradient(net_flux_matrix_w_m02, axis=1) /\n # numpy.absolute(numpy.gradient(pressure_matrix_pascals, axis=1))\n # )\n\n heating_rate_matrix_k_day01 = DAYS_TO_SECONDS * coefficient * (\n numpy.diff(net_flux_matrix_w_m02, axis=1) /\n numpy.absolute(numpy.diff(pressure_matrix_pascals, axis=1))\n )\n\n error_checking.assert_is_numpy_array_without_nan(net_flux_matrix_w_m02)\n error_checking.assert_is_numpy_array_without_nan(pressure_matrix_pascals)\n heating_rate_matrix_k_day01[numpy.isnan(heating_rate_matrix_k_day01)] = 0.\n\n vector_target_names = example_dict[VECTOR_TARGET_NAMES_KEY]\n found_heating_rate = SHORTWAVE_HEATING_RATE_NAME in vector_target_names\n if not found_heating_rate:\n vector_target_names.append(SHORTWAVE_HEATING_RATE_NAME)\n\n heating_rate_index = vector_target_names.index(SHORTWAVE_HEATING_RATE_NAME)\n example_dict[VECTOR_TARGET_NAMES_KEY] = vector_target_names\n\n if found_heating_rate:\n example_dict[VECTOR_TARGET_VALS_KEY][..., heating_rate_index] = (\n heating_rate_matrix_k_day01\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY],\n obj=heating_rate_index, values=heating_rate_matrix_k_day01, axis=-1\n )\n\n return example_dict", "def specific_heat(reduced_DOS, T, mu=None, n_e=1e15, dT_frac=0.01):\n # generate high and low temperatures, which are +/- 5% from T\n dT = T * dT_frac\n T_h = T + 0.5 * dT\n T_l = T - 0.5 * dT\n\n [eps, dens] = reduced_DOS\n if mu is None:\n # these functions are yet to be moved into this file.\n mu_high = get_mu_at_T([eps, dens], T_h, n_e=n_e)\n mu_low = get_mu_at_T([eps, dens], T_l, n_e=n_e)\n else:\n mu_high = mu\n mu_low = mu\n\n dU = simps((fermi(eps, mu_high, T_h)-fermi(eps, mu_low, T_l))\n * (eps-mu_low)* dens, x=eps)\n \n #dU = np.trapz((fermi(eps, mu_high, T_h)-fermi(eps, mu_low, T_l))\n # * (eps-mu_low)* DOS, x = eps) \n # previously used (eps-mu_low) instead of (eps) in above. Need to think\n # about this a bit more. \n\n # commented factors would convert to J/(K m**2)\n return dU/dT * nu0", "def test_heat_model():\n # define the model\n heat_model = Heat()\n\n # test the model\n u = heat_model.perform_simulation(kappa=jnp.array([0.5, 0.5, 0.25]))\n\n # build the grid\n y_1 = np.linspace(0, 1, 100)\n y_2 = np.linspace(0, 1, 100)\n Y_1, Y_2 = np.meshgrid(y_1, y_2)\n extent = [y_1[0], y_1[-1], y_2[0], y_2[-1]]\n\n # Define the color map\n colors = [\n \"#762a83\",\n \"#9970ab\",\n \"#c2a5cf\",\n \"#e7d4e8\",\n \"#d9f0d3\",\n \"#a6dba0\",\n \"#5aae61\",\n \"#1b7837\",\n ]\n cmap = mcolors.LinearSegmentedColormap.from_list(\"my_colormap\", colors)\n\n # plot the KDE: draw the function\n fig, ax = plt.subplots(figsize=(3.8, 3.0))\n im = ax.imshow(u.T, origin=\"lower\", cmap=cmap, extent=extent, aspect=1)\n\n # draw the contour lines\n cset = ax.contour(\n u.T,\n np.arange(0, 1, 0.1),\n # np.arange(np.min(u), np.max(u), (np.max(u) - np.min(u)) / 8),\n linewidths=2,\n color=\"k\",\n extent=extent,\n aspect=1,\n )\n ax.clabel(cset, inline=True, fmt=\"%1.2f\", fontsize=10)\n\n # draw the colorbar\n fig.colorbar(im, ax=ax, location=\"right\")\n\n # add pretty stuff\n fig.suptitle(r\"Solution of the heat equation\")\n ax.set_xlabel(r\"$x_1$\")\n ax.set_ylabel(r\"$x_2$\")\n plt.show()", "def turn_aux_heat_off(self):\n self.set_operation_mode(STATE_HEAT)", "def heat(update: Update, context: CallbackContext) -> None:\n if __login.is_user_logged_in():\n if __sauna.control.getPortValue(\"Mains Sensor\") == 1:\n\n if __sauna.control.getPortValue(\"Power Sensor\") == 0:\n __sauna.control.togglePortValue(\"Power Switch\")\n\n if __sauna.control.getPortValue(\"Light Sensor\") == 0:\n __sauna.control.togglePortValue(\"Light Switch\")\n\n if __sauna.control.is_below_lower_limit(\"Temperature Sensor\"):\n if __sauna.control.getPortValue(\"Oven Sensor\") == 0:\n __sauna.control.togglePortValue(\"Oven Switch\")\n update.message.reply_text('Starting to heat.')\n else:\n update.message.reply_text('Main power is switched off. Needs to be set manually.')\n else:\n update.message.reply_text(\"You are not logged in. Log in!!\")", "def two_temps(L, n_cycles, temp):\n\n E = np.zeros((2, len(temp), n_cycles))\n Mag = np.zeros((2, len(temp), n_cycles))\n MagAbs = np.zeros((2, len(temp), n_cycles))\n SH = np.zeros((2, len(temp), n_cycles))\n Suscept = np.zeros((2, len(temp), n_cycles))\n\n Naccept = np.zeros((2, len(temp), n_cycles))\n\n ground_spin_mat = np.ones((L, L), np.int8) # initial state (ground state)\n\n for m in range(2):\n for t in range(len(temp)):\n\n #m=0 is ground state, all spin-up\n #m=1 is random state\n if m==0:\n spin_matrix = ground_spin_mat\n else:\n s_mat_random = np.ones((L, L), np.int8) # spin matrix of ones\n #random indices switched to -1\n for sw in range(len(s_mat_random)):\n for sl in range(len(s_mat_random)):\n rint = np.random.randint(-1,1)\n if rint == -1:\n s_mat_random[sw,sl] *= -1\n spin_matrix = s_mat_random\n\n\n print(\"hi\")#; sys.exit(1)\n Energy, Magnetization, MagnetizationAbs, SpecificHeat, Susceptibility, Nacc \\\n = numerical_solution(spin_matrix, n_cycles, temp[t], L, abs=False)\n\n\n E[m,t,:] = Energy\n Mag[m,t,:] = Magnetization\n MagAbs[m,t,:] = MagnetizationAbs\n SH[m,t,:] = SpecificHeat\n Suscept[m,t,:] = Susceptibility\n\n Naccept[m,t,:] = Nacc\n\n return E, Mag, MagAbs, SH, Suscept, Naccept", "def SCF(N, R, Zeta1, Zeta2, Za, Zb, G):\n Crit = 1e-11 # Convergence critera\n Maxit = 250 # Maximum number of iterations\n Iter = 0\n\n ######## STEP 1. Guess an initial density matrix ########\n # Use core hamiltonian for initial guess of F, I.E. (P=0)\n P = np.zeros([2, 2])\n\n Energy = 0.0\n\n while (Iter < Maxit):\n Iter += 1\n print(Iter)\n\n ######## STEP 2. calculate the Fock matrix ########\n # Form two electron part of Fock matrix from P\n G = np.zeros([2, 2]) # This is the two electron contribution in the equations above\n for i in range(2):\n for j in range(2):\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])\n\n # Add core hamiltonian H^CORE to get fock matrix\n F = H + G\n\n # Calculate the electronic energy\n Energy = np.sum(0.5 * P * (H + F))\n\n print('Electronic energy = ', Energy)\n\n ######## STEP 3. Calculate F' (remember S^-1/2 is X and S^1/2 is X.T) ########\n G = np.matmul(F, X)\n Fprime = np.matmul(X.T, G)\n\n ######## STEP 4. Solve the eigenvalue problem ########\n # Diagonalise transformed Fock matrix\n Diag(Fprime, Cprime, E)\n\n ######## STEP 5. Calculate the molecular orbitals coefficients ########\n # Transform eigen vectors to get matrix C\n C = np.matmul(X, Cprime)\n\n ######## STEP 6. Calculate the new density matrix from the old P ########\n Oldp = np.array(P)\n P = np.zeros([2, 2])\n\n # Form new density matrix\n for i in range(2):\n for j in range(2):\n # Save present density matrix before creating a new one\n for k in range(1):\n P[i, j] += 2.0 * C[i, k] * C[j, k]\n\n ######## STEP 7. Check to see if the energy has converged ########\n Delta = 0.0\n # Calculate delta the difference between the old density matrix Old P and the new P\n Delta = (P - Oldp)\n Delta = np.sqrt(np.sum(Delta ** 2) / 4.0)\n print(\"Delta\", Delta)\n\n # Check for convergence\n if (Delta < Crit):\n # Add nuclear repulsion to get the total energy\n Energytot = Energy + Za * Zb / R\n print(\"Calculation converged with electronic energy:\", Energy)\n print(\"Calculation converged with total energy:\", Energytot)\n print(\"Density matrix\", P)\n print(\"Mulliken populations\", np.matmul(P, S))\n print(\"Coeffients\", C)\n\n break", "def calc_plate_HEX(NTU, cr):\n eff = 1 - scipy.exp((1 / cr) * (NTU ** 0.22) * (scipy.exp(-cr * (NTU) ** 0.78) - 1))\n return eff", "def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)", "def t_rh_2_dewT(ds, var):\n ds['dew'] = 243.04 * (np.log(ds[var['rh']] / 100) + ((17.625 * ds[var['temp']]) / (243.04 + ds[var['temp']])))/\\\n (17.625-np.log(ds[var['rh']] / 100) - ((17.625 * ds[var['temp']]) / (243.04 + ds[var['temp']])))\n return ds", "def general_solver(heat_flux, temp_initial, temp_air, k, alpha, x_grid,t_grid, upsilon, \n bc_surface, sigma):\n\n # temperatures are reported as a data frame, where each column is a step in time\n temperatures = pd.DataFrame(columns = [n for n in t_grid])\n\n # extract the necessary parameters to determine the surface heat losses\n if bc_surface[0] == \"linear\":\n h = bc_surface[1] + bc_surface[2]\n hc = 0\n emissivity = 0\n elif bc_surface[0] == \"non-linear\":\n h = 0\n hc = bc_surface[1]\n emissivity = bc_surface[2]\n\n # initialize temperature arrays for present and future temperatures\n T = np.zeros_like(x_grid) + temp_initial\n Tn = np.zeros_like(x_grid)\n\n # iterate over each time step\n temperatures.iloc[:,0] = T\n for j, t in enumerate(t_grid[:-1]):\n \n # create tri-diagonal matrix A\n A = tridiag_matrix(bc_surface_type = bc_surface[0], upsilon = upsilon, \n space_divisions = len(x_grid), dx = x_grid[1] - x_grid[0], \n k = k, T = T, h = h, hc = hc, emissivity = emissivity, sigma = sigma)\n \n # create vector b\n b = vector_b(bc_surface_type = bc_surface[0], upsilon = upsilon, \n space_divisions = len(x_grid), dx = x_grid[1] - x_grid[0], \n k = k, T = T, T_air = temp_air, heat_flux = heat_flux, h = h, hc = hc, \n emissivity = emissivity, sigma = sigma, j = j)\n \n # calculate value of future temperature\n Tn = np.linalg.solve(A,b)\n \n # update present temperature\n T = Tn.copy()\n \n # store temperature profile at this time in the data frame\n temperatures.iloc[:, j+1] = Tn\n \n return temperatures", "def heat(t0, t1, dt, nu, timer=False, dim=None, verbose=False, start_u=None, f=None):\n if verbose:\n print \"verbose mode activated!\"\n if dim is None:\n if start_u is not None and f is not None: # if both u and f are passed\n if type(start_u) == ndarray:\n u=start_u.tolist() # converts input u to python list if the user tries to pass a numpy array (as will happen e.g. if the user use the input_file argument in ui.py)\n else:\n u = start_u\n if type(f) == ndarray:\n f=f.tolist()\n if len(f) != len(u) or len(f[0]) != len(u[0]):\n print 'Error! u and f must have the same dimensions!'\n sys.exit(1)\n else:\n if start_u is None and f is None: # if neither dimensions, u or f is specified, u and f is set by default to a 100 x 50 rectangle with all values set to zero and one, respectively\n u = [[0 for x in range(50)] for x in range(100)]\n f = [[1 for x in range(50)] for x in range(100)]\n elif start_u is None: # if only f is given, u is set to an array of the same dimensions as f with all values set to zero\n if type(f)==ndarray:\n f=f.tolist()\n u=[[0 for x in range(len(f[0]))] for x in range(len(f))] \n elif f is None: # if only u is given, f is set to an array of the same dimensions as u with all values set to one\n if type(start_u)==ndarray:\n u=start_u.tolist()\n else:\n u=start_u\n f=[[1 for x in range(len(u[0]))] for x in range(len(u))] \n else: # if dimensions are specified, u and f are set to rectangles with dimensions dim[0] x dim[1] with all values set to zero and one, respectively.\n u = [[0 for x in range(dim[1])] for x in range(dim[0])]\n f = [[1 for x in range(dim[1])] for x in range(dim[0])]\n t=t0\n if timer: # start timer if promted by the user\n from timeit import default_timer as time\n start_time = time()\n u_new = deepcopy(u)\n while t<t1: # a triple loop over time values, row indexes and collumn indexes \n for i in range(1, len(u)-1):\n for j in range(1, len(u[i])-1):\n u_new[i][j] = u[i][j] + dt*(nu*u[i-1][j] + nu*u[i][j-1] - 4*nu*u[i][j] + nu*u[i][j+1] + nu*u[i+1][j] + f[i][j])\n if verbose:\n print \"Now processing time step\", t\n u=deepcopy(u_new)\n t=t+dt\n if timer:\n end_time = time() # end timer\n print 'loop executed in {t:.3f} seconds'.format(t=end_time-start_time)\n return u_new", "def risetime_calc(self):\n\n # given the transmitter's 20%-80% risetime, and assuming a\n # Gaussian impulse response, calculate the 10%-90% risetime\n # cell G3\n\n #self.tx_1090_rise = 1.518*self.tx_2080_rise #Fix 1 : Formula not same as in Cell T7\n self.tx_1090_rise = 329*1000/self.tx_2080_rise\n \n # calculate the effective risetimes for the fiber channel, given\n # the bandwidths calculated in the previous section, assuming\n # a Gaussian impulse response model\n self.cd_1090_rise = 0.48E6 / self.bw_cd\n self.md_1090_rise = 0.48E6 / self.bw_md\n\n # calculate the risetime for the link receiver, given its\n # bandwidth and assuming a single pole impulse response\n # Cell T7\n self.rx_1090_rise = 0.329E6/self.rx_bw\n\n # calculate the risetime for the test receiver used for transmitter\n # eye displays, given its bandwidth and assuming a single pole\n # response\n self.rx_txeye_1090_rise = 0.329E6 / self.txeye_rx_bw\n\n # calculate Te from column H and Tc from column I\n tr_tx_2 = self.tx_1090_rise**2*self.l_1\n tr_rx_2 = self.rx_1090_rise**2*self.l_1\n tr_cd_2 = np.square(self.cd_1090_rise)\n tr_md_2 = np.square(self.md_1090_rise)\n self.te = np.sqrt(tr_cd_2 + tr_md_2 + tr_tx_2) # column H\n \n self.tc = np.sqrt(tr_cd_2 + tr_md_2 + tr_tx_2 + tr_rx_2) # column I\n \n\n # end of GbE10..risetime_calc", "def doit(gts, hr):\n sts = gts - datetime.timedelta(hours=hr)\n times = [gts]\n if hr > 24:\n times.append(gts - datetime.timedelta(hours=24))\n if hr == 72:\n times.append(gts - datetime.timedelta(hours=48))\n metadata = {'start_valid': sts.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'end_valid': gts.strftime(\"%Y-%m-%dT%H:%M:%SZ\")}\n # Create the image data\n # imgdata = np.zeros( (szy, szx), 'u1')\n # timestep = np.zeros( (szy, szx), 'f')\n total = None\n for now in times:\n gribfn = now.strftime((\"/mnt/a4/data/%Y/%m/%d/mrms/ncep/\"\n \"RadarOnly_QPE_24H/\"\n \"RadarOnly_QPE_24H_00.00_%Y%m%d-%H%M00\"\n \".grib2.gz\"))\n if not os.path.isfile(gribfn):\n print(\"mrms_raster_pXXh.py MISSING %s\" % (gribfn,))\n return\n fp = gzip.GzipFile(gribfn, 'rb')\n (tmpfp, tmpfn) = tempfile.mkstemp()\n tmpfp = open(tmpfn, 'wb')\n tmpfp.write(fp.read())\n tmpfp.close()\n grbs = pygrib.open(tmpfn)\n grb = grbs[1]\n os.unlink(tmpfn)\n\n # careful here, how we deal with the two missing values!\n if total is None:\n total = grb['values']\n else:\n maxgrid = np.maximum(grb['values'], total)\n total = np.where(np.logical_and(grb['values'] >= 0, total >= 0),\n grb['values'] + total, maxgrid)\n\n \"\"\"\n 255 levels... wanna do 0 to 20 inches\n index 255 is missing, index 0 is 0\n 0-1 -> 100 - 0.01 res || 0 - 25 -> 100 - 0.25 mm 0\n 1-5 -> 80 - 0.05 res || 25 - 125 -> 80 - 1.25 mm 100\n 5-20 -> 75 - 0.20 res || 125 - 500 -> 75 - 5 mm 180\n \"\"\"\n # total = np.flipud(total)\n # Off scale gets index 254\n imgdata = convert_to_image(total)\n\n (tmpfp, tmpfn) = tempfile.mkstemp()\n # Create Image\n png = Image.fromarray(imgdata.astype('u1'))\n png.putpalette(mrms.make_colorramp())\n png.save('%s.png' % (tmpfn,))\n # os.system(\"xv %s.png\" % (tmpfn,))\n # Now we need to generate the world file\n mrms.write_worldfile('%s.wld' % (tmpfn,))\n # Inject WLD file\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot ac %s \"\n \"gis/images/4326/mrms/p%sh.wld GIS/mrms/p%sh_%s.wld wld' %s.wld\"\n \"\") % (gts.strftime(\"%Y%m%d%H%M\"), hr, hr,\n gts.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n # Now we inject into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot ac %s \"\n \"gis/images/4326/mrms/p%sh.png GIS/mrms/p%sh_%s.png png' %s.png\"\n \"\") % (gts.strftime(\"%Y%m%d%H%M\"), hr, hr,\n gts.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n # Create 900913 image\n cmd = (\"gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff \"\n \"-tr 1000.0 1000.0 %s.png %s.tif\") % (tmpfn, tmpfn)\n subprocess.call(cmd, shell=True)\n\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/900913/mrms/p%sh.tif GIS/mrms/p%sh_%s.tif tif' %s.tif\"\n \"\") % (gts.strftime(\"%Y%m%d%H%M\"), hr, hr,\n gts.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n j = open(\"%s.json\" % (tmpfn,), 'w')\n j.write(json.dumps(dict(meta=metadata)))\n j.close()\n\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/4326/mrms/p%sh.json GIS/mrms/p%sh_%s.json json'\"\n \" %s.json\") % (gts.strftime(\"%Y%m%d%H%M\"), hr, hr,\n gts.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n for suffix in ['tif', 'json', 'png', 'wld']:\n os.unlink('%s.%s' % (tmpfn, suffix))\n os.close(tmpfp)\n os.unlink(tmpfn)", "def calculate(self, waste_heat=0):\n radiator_type = self.radiators_type.get()\n data = self.radiators[radiator_type]\n if not waste_heat:\n for _, subsystem in self.data.wasteheat.items():\n waste_heat += subsystem\n self.waste_heat.set(waste_heat)\n area = waste_heat / (data[\"Specific area heat\"] * 1000)\n mass = (area * 1000) * data[\"Specific area mass\"]\n self.data.masses[\"Lifesupport Radiators\"] = mass\n self.area.set(area)\n self.mass.set(mass)\n self.radiator_temperature.set(self.radiators[radiator_type][\"Radiator Temperature\"])", "def hinderedRotor_heatCapacity(T, freq, barr):\n x = constants.h * constants.c * 100. * freq / constants.kB / T\n exp_x = math.exp(x)\n one_minus_exp_x = 1.0 - exp_x\n z = 0.5 * constants.h * constants.c * 100. * barr / constants.kB / T\n BB = scipy.special.i1(z) / scipy.special.i0(z)\n return x * x * exp_x / one_minus_exp_x / one_minus_exp_x - 0.5 + z * (z - BB - z * BB * BB)", "def th_cell_diff(th_state, time, d):\n assert d[\"alpha_int\"] < d[\"alpha\"]\n \n # divide array into cell states\n tnaive = th_state[:d[\"alpha_int\"]]\n tint = th_state[d[\"alpha_int\"]:d[\"alpha\"]]\n teff = th_state[d[\"alpha\"]:]\n \n assert len(tnaive)+len(tint)+len(teff) == len(th_state)\n tnaive = np.sum(tnaive)\n tint = np.sum(tint)\n teff = np.sum(teff)\n \n # IL2 production\n il2_producers = tnaive+tint\n il2_consumers = teff+tint \n conc_il2 = d[\"rate_il2\"]*il2_producers/(d[\"K_il2\"]+il2_consumers)\n \n # IL7 production\n il7_consumers = il2_consumers\n conc_il7 = d[\"rate_il7\"] / (d[\"K_il2\"]+il7_consumers)\n \n # apply feedback on rate beta\n fb_ifn = 0\n if d[\"fb_ifn\"] != 0:\n conc_ifn = d[\"rate_ifn\"]*(il2_producers)\n fb_ifn = (d[\"fb_ifn\"]*conc_ifn**3)/(conc_ifn**3+d[\"K_ifn\"]**3)\n \n beta = (fb_ifn+1)*d[\"beta\"] \n beta_p = d[\"beta_p\"] \n rate_death = d[\"d_eff\"]\n\n # check homeostasis criteria\n if d[\"crit\"] == False:\n update_t0(d, time, conc_il2, conc_il7)\n elif d[\"death_mode\"] == False:\n beta_p = beta_p*np.exp(-d[\"decay_p\"]*(time-d[\"t0\"]))\n else:\n rate_death = rate_death*np.exp(0.1*(time-d[\"t0\"]))\n \n # differentiation \n dt_state = diff_effector_new(th_state, teff, d, beta, rate_death, beta_p)\n \n \n return dt_state", "def refill_real(img, result, clustermask, cluster_colors):\n overall_dist = 0\n w, h, _ = img.shape\n for x in range(w):\n for y in range(h):\n cid = clustermask[x, y]\n result[x, y] = cluster_colors[cid]", "def matFail(dim):\n bst = dim[0]\n tst = dim[1]\n tsk = dim[2]\n\n Et = (Esk * tsk) + (Est * ((bst * tst) / bsk))\n Nmat = Et*maxstrain # Critical Load\n rsf = Nmat/Nx\n return rsf - 1.1 # Using a target Reserve Factor of >=1.1", "def calc_ST_operation(m_exhaust_GT_kgpers, T_exhaust_GT_K, T_sup_K, fuel_type):\n\n # calaulate High Pressure (HP) and Low Pressure (LP) mass flow of a double pressure steam turbine\n temp_i_K = (0.9 * ((6 / 48.2) ** (0.4 / 1.4) - 1) + 1) * (T_exhaust_GT_K - ST_DELTA_T)\n if fuel_type == 'NG':\n Mexh = 103.7 * 44E-3 + 196.2 * 18E-3 + 761.4 * 28E-3 + 200.5 * (CC_AIRRATIO - 1) * 32E-3 \\\n + 200.5 * (CC_AIRRATIO - 1) * 3.773 * 28E-3\n ncp_exh = 103.7 * 44 * 0.846 + 196.2 * 18 * 1.8723 + 761.4 * 28 * 1.039 \\\n + 200.5 * (CC_AIRRATIO - 1) * 32 * 0.918 + 200.5 * (CC_AIRRATIO - 1) * 3.773 * 28 * 1.039\n cp_exh = ncp_exh / Mexh # J/kgK\n else:\n Mexh = 98.5 * 44E-3 + 116 * 18E-3 + 436.8 * 28E-3 + 115.5 * (CC_AIRRATIO - 1) * 32E-3 \\\n + 115.5 * (CC_AIRRATIO - 1) * 3.773 * 28E-3\n ncp_exh = 98.5 * 44 * 0.846 + 116 * 18 * 1.8723 + 436.8 * 28 * 1.039 \\\n + 115.5 * (CC_AIRRATIO - 1) * 32 * 0.918 + 115.5 * (CC_AIRRATIO - 1) * 3.773 * 28 * 1.039\n cp_exh = ncp_exh / Mexh # J/kgK\n\n a = np.array([[1653E3 + HEAT_CAPACITY_OF_WATER_JPERKGK * (T_exhaust_GT_K - ST_DELTA_T - 534.5), \\\n HEAT_CAPACITY_OF_WATER_JPERKGK * (temp_i_K - 534.5)], \\\n [HEAT_CAPACITY_OF_WATER_JPERKGK * (534.5 - 431.8), \\\n 2085.8E3 + HEAT_CAPACITY_OF_WATER_JPERKGK * (534.5 - 431.8)]])\n b = np.array([m_exhaust_GT_kgpers * cp_exh * (T_exhaust_GT_K - (534.5 + ST_DELTA_T)), \\\n m_exhaust_GT_kgpers * cp_exh * (534.5 - 431.8)])\n [mdotHP_kgpers, mdotLP_kgpers] = np.linalg.solve(a, b)\n\n # calculate thermal output\n T_cond_0_K = T_sup_K + CC_DELTA_T_DH # condensation temperature constrained by the DH network temperature\n pres0 = (0.0261 * (T_cond_0_K - 273) ** 2 - 2.1394 * (T_cond_0_K - 273) + 52.893) * 1E3\n\n delta_h_evap_Jperkg = (-2.4967 * (T_cond_0_K - 273) + 2507) * 1E3\n q_output_ST_W = (mdotHP_kgpers + mdotLP_kgpers) * delta_h_evap_Jperkg # thermal output of ST\n\n # calculate electricity output\n h_HP_Jperkg = (2.5081 * (T_exhaust_GT_K - ST_DELTA_T - 273) + 2122.7) * 1E3 # J/kg\n h_LP_Jperkg = (2.3153 * (temp_i_K - 273) + 2314.7) * 1E3 # J/kg\n h_cond_Jperkg = (1.6979 * (T_cond_0_K - 273) + 2506.6) * 1E3 # J/kg\n\n el_produced_ST_W = mdotHP_kgpers * (h_HP_Jperkg - h_LP_Jperkg) + \\\n (mdotHP_kgpers + mdotLP_kgpers) * (h_LP_Jperkg - h_cond_Jperkg) # turbine electricity output\n\n el_input_compressor_W = SPEC_VOLUME_STEAM * (\n mdotLP_kgpers * (6E5 - pres0) + (mdotHP_kgpers + mdotLP_kgpers) * (48.2E5 - 6E5)) # compressor electricity use\n\n el_output_ST_W = ST_GEN_ETA * (el_produced_ST_W - el_input_compressor_W) # gross electricity production of turbine\n\n return q_output_ST_W, el_output_ST_W", "def entropy(self):\n\n \"\"\"Gets the first neighbours, which are the first 2*r+1 cells.\"\"\"\n current_neighbours = []\n amount = [0] * self.k ** (2 * self.r + 1)\n for i in range(2 * self.r + 1):\n current_neighbours.append(self.config[self.t, i % self.width])\n\n \"\"\"Calculates the rule and adds one to it's amount. It then removes the\n leftmost cell and adds a cell to the right.\"\"\"\n for i in range(len(self.config[self.t]) - 1):\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount) - 1 - rule] += 1\n current_neighbours.pop(0)\n current_neighbours.append(\n self.config[self.t, (2 * self.r + 1 + i) % self.width])\n\n \"\"\"Calculates the rule for the last neighbourhood.\"\"\"\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount)-1 - rule] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(len(amount)):\n if(amount[i] != 0):\n probability = amount[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_entropy = (self.average_entropy *\n self.t + shannon) / (self.t + 1)", "def hysteresis_thresholding(image, image_gradients, min_val, max_val):\n\tprint(\"BEFORE HYSTERISIS THRESHOLDING:\", image)\n\tprint(\"gradients:\", image_gradients)\n\n\tlargest_gradient_value = np.max(image_gradients)\n\twhile largest_gradient_value < max_val:\n\t\tprint(\"Largest gradient value:\", largest_gradient_value)\n\t\twarnings.warn(UserWarning(\"Image has no edge gradients above upper threshold, increasing all gradients values!\"))\n\t\t# return np.zeros_like(image)\n\t\timage_gradients *= 1.5\n\t\tlargest_gradient_value = np.max(image_gradients)\n\t\n\t# print(\"Largest gradient value:\", largest_gradient_value)\n\t# the set of all 'strong' indices.\n\tstrong_indices = indices_where(image_gradients >= max_val)\n\toff_indices \t= indices_where(image_gradients < min_val)\n\tweak_indices \t= indices_where((min_val <= image_gradients) & (image_gradients < max_val))\n\t\n\timage_height = image.shape[0]\n\timage_width = image.shape[1]\n\n\t# get the neighbours of all strong edges.\n\t# convert their neighbours with weak edges to strong edges.\n\tto_explore = np.zeros_like(image_gradients, dtype=bool)\n\tto_explore[index_with(strong_indices)] = True\n\n\texplored = np.zeros_like(image_gradients, dtype=bool)\n\n\tstrong = np.zeros_like(image_gradients, dtype=bool)\n\tstrong[index_with(strong_indices)] = True\n\t# print(\"strong:\", strong)\n\n\tweak = np.zeros_like(image_gradients, dtype=bool)\n\tweak[index_with(weak_indices)] = True\n\n\tunexplored_indices = aggregate(np.nonzero(to_explore))\n\t# print(\"unexplored (initial):\", [str(v) for v in unexplored])\n\t# print(\"weak indices (initial):\", [str(v) for v in weak_indices])\n\t# print(\"off indices (initial):\", [str(v) for v in off_indices])\n\talready_explored = np.zeros_like(to_explore)\n\n\twhile len(unexplored_indices) > 0:\n\t\t\n\t\t# print(\"exploring indices \", [str(v) for v in indices])\n\t\t# print(indices)\n\n\t\tneighbours = neighbourhood(unexplored_indices, image_width, image_height)\n\t\tis_neighbour = np.zeros_like(weak)\n\t\tis_neighbour[index_with(neighbours)] = True\n\t\tis_weak_neighbour = is_neighbour & weak\n\t\tweak_neighbours = aggregate(np.nonzero(is_weak_neighbour))\n\t\t# weak_neighbours = common_rows_between(neighbours, weak_indices)\n\n\t\t# print(\"The neighbours of (\", \",\".join(str(pixel) for pixel in indices), \") are \", neighbours)\n\t\t# print(\"weak neighbours:\", [str(v) for v in weak_neighbours])\n\t\t\n\t\tstrong[index_with(weak_neighbours)] = True\n\t\tweak[index_with(weak_neighbours)] = False\n\t\t# mark that we need to explore these:\n\t\t\n\t\talready_explored[index_with(unexplored_indices)] = True\n\t\t# explore the indices of the weak neighbours, if they haven't been explored already.\n\t\tto_explore[index_with(weak_neighbours)] = True\n\t\t# do not re-explore already explored indices.\n\t\tto_explore &= ~already_explored\n\t\t\n\t\tunexplored_indices = aggregate(np.nonzero(to_explore))\n\t\n\tout = np.zeros_like(image_gradients)\n\tout[~strong] = 0\n\tout[strong] = 255\n\tprint(\"AFTER HYSTERISIS THRESHOLDING:\", out)\n\treturn out", "async def async_turn_aux_heat_on(self) -> None:\n await self._set_aux_heat(True)", "def hessian(self):\n\n hess_size = 3 * len(self.molecule.molecule['input'])\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n\n lines = file.readlines()\n\n for count, line in enumerate(lines):\n if '## Hessian' in line or '## New Matrix (Symmetry' in line:\n # Set the start of the hessian to the row of the first value.\n hess_start = count + 5\n break\n else:\n raise EOFError('Cannot locate Hessian matrix in output.dat file.')\n\n # Check if the hessian continues over onto more lines (i.e. if hess_size is not divisible by 5)\n extra = 0 if hess_size % 5 == 0 else 1\n\n # hess_length: # of cols * length of each col\n # + # of cols - 1 * #blank lines per row of hess_vals\n # + # blank lines per row of hess_vals if the hess_size continues over onto more lines.\n hess_length = (hess_size // 5) * hess_size + (hess_size // 5 - 1) * 3 + extra * (3 + hess_size)\n\n hess_end = hess_start + hess_length\n\n hess_vals = []\n\n for file_line in lines[hess_start:hess_end]:\n # Compile lists of the 5 Hessian floats for each row.\n # Number of floats in last row may be less than 5.\n # Only the actual floats are added, not the separating numbers.\n row_vals = [float(val) for val in file_line.split() if len(val) > 5]\n hess_vals.append(row_vals)\n\n # Remove blank list entries\n hess_vals = [elem for elem in hess_vals if elem]\n\n reshaped = []\n\n # Convert from list of (lists, length 5) to 2d array of size hess_size x hess_size\n for old_row in range(hess_size):\n new_row = []\n for col_block in range(hess_size // 5 + extra):\n new_row += hess_vals[old_row + col_block * hess_size]\n\n reshaped.append(new_row)\n\n hess_matrix = array(reshaped)\n\n # Cache the unit conversion.\n conversion = 627.509391 / (0.529 ** 2)\n hess_matrix *= conversion\n\n check_symmetry(hess_matrix)\n\n return hess_matrix", "def color_temp(self) -> int:\n new_range = self._tuya_temp_range()\n tuya_color_temp = self.tuya_device.status.get(self.dp_code_temp, 0)\n return (\n self.max_mireds\n - self.remap(\n tuya_color_temp,\n new_range[0],\n new_range[1],\n self.min_mireds,\n self.max_mireds,\n )\n + self.min_mireds\n )", "def _matrix_store_smooth_downhill(self):\n \n import time\n from scipy import sparse as sparse\n from scipy.sparse import linalg as linalgs \n \n\n t = time.clock()\n\n\n size = 0\n for nl in self.neighbour_array_lo_hi:\n size += 3 # len(nl)\n\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n slope_array = np.zeros(size)\n local_slope_array = np.zeros(64)\n\n\n idx=0 \n for row in range(0, len(self.neighbour_array_lo_hi)): \n neighbours = self.neighbour_array_lo_hi[row] \n npoints = self.tri.points[neighbours]\n\n ## work out (downhill) gradient to (max of three) nearby neighbours\n \n\n for col, column in enumerate(neighbours[0:3]): \n \n delta_h = self.height[column] - self.height[row] \n\n\n if delta_h < 0.0:\n delta_s2 = (self.x[column] - self.x[row])**2 + (self.y[column] - self.y[row])**2\n local_slope_array[col] = ( delta_h**2 / delta_s2 )**5\n\n elif delta_h == 0.0 and self.bmask[row] == False:\n local_slope_array[col] = 1.0e-20\n\n else:\n local_slope_array[col] = 1.0e-20 \n \n # Normalise this so that it conserves mass (note - low points will have no contributions here !) \n \n norm = local_slope_array[0:len(neighbours)].sum()\n if norm != 0.0:\n norm = 1.0 / norm\n\n for col, column in enumerate(neighbours[0:3]): \n row_array[idx] = row\n col_array[idx] = column \n slope_array[idx] = local_slope_array[col] * norm\n\n idx += 1\n\n # We can re-pack this array into a sparse matrix for v. fast computation of downhill operator \n\n slopeCOO = sparse.coo_matrix( (slope_array, (row_array, col_array)) ).T\n slopeMat = slopeCOO.tocsr() \n \n print \"SlopeMat.shape \", slopeMat.shape, size\n\n # slopeNormVec = np.array(slopeMat.sum(axis=1)).T[0]\n # slopeNormVec[slopeNormVec != 0.0] = 1.0 / slopeNormVec[slopeNormVec != 0.0]\n # slopeNormMat = sparse.eye(self.tri.npoints)\n # slopeNormMat.setdiag(slopeNormVec)\n # slopeMat = slopeNormMat.dot(slopeMat)\n\n slopeMat.eliminate_zeros()\n self.smoothDownhillMat = slopeMat\n\n return", "def test_calculate_heat_loss_kwh():\n delta_t = pd.Series(\n [12.42, 12.23, 10.85, 9.65, 7.15, 4.85, 3.0, 3.28, 5.03, 7.71, 10.38, 11.77],\n index=[\n \"jan\",\n \"feb\",\n \"mar\",\n \"apr\",\n \"may\",\n \"jun\",\n \"jul\",\n \"aug\",\n \"sep\",\n \"oct\",\n \"nov\",\n \"dec\",\n ],\n )\n hours = pd.Series(\n [d * 24 for d in [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]],\n index=[\n \"jan\",\n \"feb\",\n \"mar\",\n \"apr\",\n \"may\",\n \"jun\",\n \"jul\",\n \"aug\",\n \"sep\",\n \"oct\",\n \"nov\",\n \"dec\",\n ],\n )\n heat_loss_coefficient = pd.Series(\n [\n 121,\n 150,\n ]\n )\n expected_output = np.array(\n [\n 1118.09808,\n 994.44576,\n 976.7604,\n 840.708,\n 643.6716,\n 422.532,\n 270.072,\n 295.27872,\n 438.2136,\n 694.08504,\n 904.3056,\n 1059.58248,\n 1386.072,\n 1232.784,\n 1210.86,\n 1042.2,\n 797.94,\n 523.8,\n 334.8,\n 366.048,\n 543.24,\n 860.436,\n 1121.04,\n 1313.532,\n ]\n )\n\n output = htuse._calculate_heat_loss_kwh(\n heat_loss_coefficient=heat_loss_coefficient,\n delta_t=delta_t,\n hours=hours,\n )\n\n assert_array_almost_equal(output, expected_output)", "def compute_loss(self):", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def calculate(crushes):\n\n def to_stress(force):\n pin_area = np.pi * (PIN_DIAM / 2) ** 2\n return force / pin_area\n\n def to_strain(delta, length):\n delta, length = abs(delta), abs(length)\n return delta / length # compressive positive\n\n for i, num in enumerate(crushes.index):\n crush = crushes.loc[num, 'Data']\n\n # Tissue thickness\n thickness = abs(contact_position(crush))\n crushes.loc[num, 'Thickness (mm)'] = thickness\n\n # Crush duration\n crushes.loc[num, 'Crush Duration (s)'] = crush_duration(crush)\n\n # Target duration\n delta = target_duration(crush)\n crushes.loc[num, 'Target Duration (s)'] = delta\n\n # Target stress\n target_stress = crush.loc[target_time(crush), 'Stress (MPa)']\n crushes.loc[num, 'Target Stress (MPa)'] = target_stress\n\n # Target strain\n target_strain = crush.loc[target_time(crush), 'Strain']\n crushes.loc[num, 'Target Strain'] = target_strain\n\n # Stiffness at contact\n # Assumed to be minimum\n stiffness = crush['Stiffness (MPa)'].min()\n crushes.loc[num, 'Contact Stiffness (MPa)'] = stiffness\n\n # Stiffness at target\n # Assumed to be maximum\n stiffness = crush['Stiffness (MPa)'].max()\n crushes.loc[num, 'Target Stiffness (MPa)'] = stiffness\n\n # Delta stress after target reached\n stress_relaxation = to_stress(target_relaxation(crush))\n crushes.loc[num, 'Relaxation Stress (MPa)'] = stress_relaxation\n\n # Delta strain after target reached\n holding_strain = to_strain(target_movement(crush), thickness)\n crushes.loc[num, 'Holding Strain'] = holding_strain\n\n return crushes", "def heatFlowRate(self):\n return _cantera.wall_Q(self.__wall_id)", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def ss(image1, image2, hue_angel = 60, saturation_interval_size = 10, value_interval_size = 10):\r\n\r\n\tassert image1.shape[-1] == 3 and image2.shape[-1] == 3, \"only RGB images are accpted\"\r\n\tassert 1 <= saturation_interval_size <= 100, \"saturation_interval_size recommended to be between 1 and 100\"\r\n\tassert 1 <= value_interval_size <= 100, \"value_interval_size recommended to be between 1 and 100\"\r\n\r\n\tdis1, color1 = get_col_dist(image1, hue_angel, saturation_interval_size, value_interval_size)\r\n\tdis2, color2 = get_col_dist(image2, hue_angel, saturation_interval_size, value_interval_size)\r\n\r\n\t## to make sure the lengths of two distributions are the same\r\n\tif len(dis1) >= len(dis2):\r\n\r\n\t\tdis2 = np.pad(dis2, (0, len(dis1) - len(dis2)), \"constant\")\r\n\telse:\r\n\t\tdis1 = np.pad(dis1, (0, len(dis2) - len(dis1)), \"constant\")\r\n\r\n\t## the distribution difference\r\n\tdis_diff = (np.sum((dis1 - dis2) ** 2) / len(dis1)) ** 0.5\r\n\r\n\t\"\"\"\r\n\thue_diff = get_hue_diff(color1, color2)\r\n\r\n\tsaturation_diff = channel_sqrdiff(color1, color2, 2, 100 / saturation_interval_size)\r\n\r\n\tvalue_diff = channel_sqrdiff(color1, color2, 3, 100 / value_interval_size)\r\n\r\n\tcolor_difference = diff_aggregate(hue_diff, saturation_diff, value_diff,\r\n\t\tweights = (dis1 + dis2) / 2)\r\n\r\n\t\"\"\"\r\n\treturn dis_diff#, color_difference\r", "def T1_contrast():\n t = np.linspace(0, 10000, 10000)\n s1 = MZ(t, 200, 900)\n s2 = MZ(t, 200, 1200)\n r = np.argmax(s1 - s2)\n print(f\"{r:.2f}\")\n plt.plot(t, s1, label=\"White matter\")\n plt.plot(t, s2, label=\"Grey matter\")\n plt.legend()\n plt.show()", "def read_elevation(filepath):\n h = 83 #distance between elevation measures\n N = 1201\n theta = np.pi / 6\n elev_array = np.zeros((N, N))\n grad_array = np.zeros((N, N, 2))\n I_array = np.zeros((N, N))\n # Read the elevation data as described in Question 3, and store in the elvation array\n f = open(filepath, \"rb\")\n for i in range(N):\n for j in range(N):\n buf = f.read(2)\n val = struct.unpack(\">h\", buf)[0]\n elev_array[i][j] = val\n f.close()\n # Populate the gradient array\n for i in range(N):\n for j in range(N):\n #This if statements handle the border cases\n if j == 0:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j]) / h\n elif j == N - 1:\n grad_array[i][j][0] = (elev_array[i][j] - elev_array[i][j-1]) / h\n else:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j-1]) / (2 * h)\n \n if i == 0:\n grad_array[i][j][1] = (elev_array[i][j] - elev_array[i-1][j]) / h\n elif i == N - 1:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i][j]) / h\n else:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i+1][j]) / (2 * h)\n \n # Populate intensities\n for i in range(N):\n for j in range(N):\n denom = np.sqrt(grad_array[i][j][0] ** 2 + grad_array[i][j][1] ** 2 + 1)\n numer = np.cos(theta) * grad_array[i][j][0] + np.sin(theta) * grad_array[i][j][1]\n I_array[i][j] = -1 * numer / denom\n \n return elev_array, I_array" ]
[ "0.72527164", "0.6295159", "0.6292347", "0.62802047", "0.6267784", "0.60749567", "0.59754294", "0.59540564", "0.58983105", "0.5896097", "0.589459", "0.5840689", "0.56996167", "0.56794786", "0.5667763", "0.5648994", "0.56376565", "0.56210124", "0.5620937", "0.55695546", "0.55411637", "0.5532295", "0.55132884", "0.54602915", "0.54493475", "0.5431156", "0.5414194", "0.5403336", "0.5380926", "0.5365836", "0.53463644", "0.53308487", "0.5316243", "0.5311264", "0.5308938", "0.5307379", "0.53046525", "0.52995783", "0.52958673", "0.5284788", "0.52712935", "0.5271207", "0.5259491", "0.5258562", "0.52559423", "0.5237733", "0.5235733", "0.52353126", "0.5234165", "0.52287394", "0.52149665", "0.52143055", "0.5210118", "0.52063596", "0.51988924", "0.519704", "0.519606", "0.518743", "0.5186469", "0.51864296", "0.5184893", "0.5182605", "0.51658297", "0.5157798", "0.51564217", "0.51561654", "0.5151725", "0.5145653", "0.51410973", "0.5138515", "0.51347923", "0.51316893", "0.5130006", "0.5126567", "0.5126208", "0.51093435", "0.5105122", "0.50871724", "0.50856334", "0.5080083", "0.50680643", "0.5062533", "0.5060926", "0.5055639", "0.5054111", "0.5053235", "0.50521916", "0.5046185", "0.50430703", "0.50366825", "0.5032068", "0.50319517", "0.503166", "0.50313437", "0.5017042", "0.50131786", "0.5011574", "0.5008596", "0.50066924", "0.50053847" ]
0.75004613
0
Calculate the capital costs. Attributes
def calc_capital_costs (self): road_needed = 'road needed' if self.cd['on road system']: road_needed = 'road not needed' dist = self.comp_specs['distance to community'] self.capital_costs = self.comp_specs['est. intertie cost per mile']\ [road_needed] * dist #~ print self.capital_costs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_capital_costs (self):\n self.capital_costs = self.max_boiler_output * \\\n self.comp_specs[\"cost per btu/hrs\"]\n #~ print self.capital_costs", "def calc_capital_costs (self):\n powerhouse_control_cost = 0\n if not self.cd['switchgear suitable for renewables']:\n powerhouse_control_cost = self.cd['switchgear cost']\n\n #~ road_needed = self.comp_specs['road needed for transmission line']\n\n\n if str(self.comp_specs['transmission capital cost'])\\\n != 'UNKNOWN':\n transmission_line_cost = \\\n int(self.comp_specs['transmission capital cost'])\n else:\n if str(self.comp_specs['distance to resource']) \\\n != 'UNKNOWN':\n distance = \\\n float(self.comp_specs\\\n ['distance to resource'])\n transmission_line_cost = \\\n distance*self.comp_specs['est. transmission line cost']\n\n secondary_load_cost = 0\n if self.comp_specs['secondary load']:\n secondary_load_cost = self.comp_specs['secondary load cost']\n\n if str(self.comp_specs['generation capital cost']) \\\n != 'UNKNOWN':\n wind_cost = \\\n int(self.comp_specs['generation capital cost'])\n self.cost_per_kw = np.nan\n else:\n for i in range(len(self.comp_specs['estimated costs'])):\n if int(self.comp_specs['estimated costs'].iloc[i].name) < \\\n self.load_offset_proposed:\n if i == len(self.comp_specs['estimated costs']) - 1:\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n continue\n\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n\n wind_cost = self.load_offset_proposed * cost\n self.cost_per_kw = cost\n\n #~ print powerhouse_control_cost\n #~ print transmission_line_cost\n #~ print secondary_load_cost\n #~ print wind_cost\n self.capital_costs = powerhouse_control_cost + transmission_line_cost +\\\n secondary_load_cost + wind_cost\n\n #~ print 'self.capital_costs',self.capital_costs", "def cost(self) -> float:", "def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost", "def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs", "def get_expected_cost(self):", "def set_costs(self) -> None:\n self[\"glider cost\"] = (\n self[\"glider base mass\"] * self[\"glider cost slope\"]\n + self[\"glider cost intercept\"]\n )\n self[\"lightweighting cost\"] = (\n self[\"glider base mass\"]\n * self[\"lightweighting\"]\n * self[\"glider lightweighting cost per kg\"]\n )\n self[\"electric powertrain cost\"] = (\n self[\"electric powertrain cost per kW\"] * self[\"electric power\"]\n )\n self[\"combustion powertrain cost\"] = (\n self[\"combustion power\"] * self[\"combustion powertrain cost per kW\"]\n )\n self[\"fuel cell cost\"] = self[\"fuel cell power\"] * self[\"fuel cell cost per kW\"]\n self[\"power battery cost\"] = (\n self[\"battery power\"] * self[\"power battery cost per kW\"]\n )\n self[\"energy battery cost\"] = (\n self[\"energy battery cost per kWh\"] * self[\"electric energy stored\"]\n )\n self[\"fuel tank cost\"] = self[\"fuel tank cost per kg\"] * self[\"fuel mass\"]\n # Per km\n self[\"energy cost\"] = self[\"energy cost per kWh\"] * self[\"TtW energy\"] / 3600\n\n # For battery, need to divide cost of electricity\n # at battery by efficiency of charging\n # to get costs at the \"wall socket\".\n\n _ = lambda x: np.where(x == 0, 1, x)\n self[\"energy cost\"] /= _(self[\"battery charge efficiency\"])\n\n self[\"component replacement cost\"] = (\n self[\"energy battery cost\"] * self[\"battery lifetime replacements\"]\n + self[\"fuel cell cost\"] * self[\"fuel cell lifetime replacements\"]\n )\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n to_markup = yaml.safe_load(stream)[\"markup\"]\n\n self[to_markup] *= self[\"markup factor\"]\n\n # calculate costs per km:\n self[\"lifetime\"] = self[\"lifetime kilometers\"] / self[\"kilometers per year\"]\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n purchase_cost_params = yaml.safe_load(stream)[\"purchase\"]\n\n self[\"purchase cost\"] = self[purchase_cost_params].sum(axis=2)\n # per km\n amortisation_factor = self[\"interest rate\"] + (\n self[\"interest rate\"]\n / (\n (np.array(1) + self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n - np.array(1)\n )\n )\n self[\"amortised purchase cost\"] = (\n self[\"purchase cost\"] * amortisation_factor / self[\"kilometers per year\"]\n )\n\n # per km\n self[\"maintenance cost\"] = (\n self[\"maintenance cost per glider cost\"]\n * self[\"glider cost\"]\n / self[\"kilometers per year\"]\n )\n\n # simple assumption that component replacement\n # occurs at half of life.\n self[\"amortised component replacement cost\"] = (\n (\n self[\"component replacement cost\"]\n * (\n (np.array(1) - self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n / 2\n )\n )\n * amortisation_factor\n / self[\"kilometers per year\"]\n )\n\n self[\"total cost per km\"] = (\n self[\"energy cost\"]\n + self[\"amortised purchase cost\"]\n + self[\"maintenance cost\"]\n + self[\"amortised component replacement cost\"]\n )", "def calculate_cost(self):\n costs = {}\n if np.abs(self.agent.get_position()[1]) > self.y_lim:\n costs['cost_outside_bounds'] = 1.\n if self.agent.velocity_violation:\n costs['cost_velocity_violation'] = 1.\n # sum all costs in one total cost\n costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))\n return costs", "def cost_a(self):\n return self._cost_a", "def calc_maintenance_cost(self):\n\n self.maintenance_cost = self.capital_costs * .01", "def getCosts(self):\n return self.costs", "def cost_b(self):\n return self._cost_b", "def calculate_cost(self, **kwargs):\n costs = {}\n if np.abs(self.agent.get_position()[0]) > self.x_lim:\n costs['cost_outside_bounds'] = 1.\n # sum all costs in one total cost\n costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))\n\n return costs", "def calculate_cost(self):\n number_collisions = self.get_collisions()\n cs = dict(\n number_collisions=number_collisions,\n cost_collisions=number_collisions\n )\n # sum all costs in one total cost\n cs['cost'] = sum(v for k, v in cs.items() if k.startswith('cost_'))\n\n return cs", "def tablecost(self):\n subtotal_getter = operator.attrgetter(\"subtotal\")\n\n cost = 0.0\n\n cost += sum(map(subtotal_getter, self.materials))\n cost += sum(map(subtotal_getter, self.processes))\n cost += sum(map(subtotal_getter, self.fasteners))\n cost += sum(map(subtotal_getter, self.toolings))\n\n return cost", "def set_costs_table(self) -> None:\n self.costs[\"B\"] = 2\n self.costs[\"A\"] = 6\n self.costs[\"fin\"] = float(\"inf\")", "def _load_costs(self):\n F_BM = self.F_BM\n F_D = self.F_D\n F_P = self.F_P\n F_M = self.F_M\n baseline_purchase_costs = self.baseline_purchase_costs\n purchase_costs = self.purchase_costs\n installed_costs = self.installed_costs\n \n # Load main costs\n for i in purchase_costs:\n if i not in baseline_purchase_costs:\n baseline_purchase_costs[i] = purchase_costs[i]\n for name, Cpb in baseline_purchase_costs.items(): \n if name in installed_costs and name in purchase_costs:\n continue # Assume costs already added elsewhere using another method\n F = F_D.get(name, 1.) * F_P.get(name, 1.) * F_M.get(name, 1.)\n try:\n installed_costs[name] = Cpb * (F_BM[name] + F - 1.)\n except KeyError:\n F_BM[name] = 1.\n installed_costs[name] = purchase_costs[name] = Cpb * F\n else:\n purchase_costs[name] = Cpb * F", "def compute_cost(AL, Y):\n pass", "def update_capital_stats(self):\n short_capital = 0\n long_capital = 0\n\n for pos in (self.active_long_positions + self.active_short_positions):\n\n if pos.order_type == Consts.LONG:\n long_capital += pos.get_current_liquid_capital()\n else:\n short_capital += pos.get_current_liquid_capital()\n\n self.short_capital = short_capital\n self.long_capital = long_capital", "def _set_costs(self):\n plant_size_kw = (self.sam_sys_inputs[\"resource_potential\"]\n / self._RESOURCE_POTENTIAL_MULT) * 1000\n\n cc_per_kw = self.sam_sys_inputs.pop(\"capital_cost_per_kw\", None)\n if cc_per_kw is not None:\n capital_cost = cc_per_kw * plant_size_kw\n logger.debug(\"Setting the capital_cost to ${:,.2f}\"\n .format(capital_cost))\n self.sam_sys_inputs[\"capital_cost\"] = capital_cost\n\n dc_per_well = self.sam_sys_inputs.pop(\"drill_cost_per_well\", None)\n num_wells = self.sam_sys_inputs.pop(\"prod_and_inj_wells_to_drill\",\n None)\n if dc_per_well is not None:\n if num_wells is None:\n msg = ('Could not determine number of wells to be drilled. '\n 'No drilling costs added!')\n logger.warning(msg)\n warn(msg)\n else:\n capital_cost = self.sam_sys_inputs[\"capital_cost\"]\n drill_cost = dc_per_well * num_wells\n logger.debug(\"Setting the drilling cost to ${:,.2f} \"\n \"({:.2f} wells at ${:,.2f} per well)\"\n .format(drill_cost, num_wells, dc_per_well))\n self.sam_sys_inputs[\"capital_cost\"] = capital_cost + drill_cost\n\n foc_per_kw = self.sam_sys_inputs.pop(\"fixed_operating_cost_per_kw\",\n None)\n if foc_per_kw is not None:\n fixed_operating_cost = foc_per_kw * plant_size_kw\n logger.debug(\"Setting the fixed_operating_cost to ${:,.2f}\"\n .format(capital_cost))\n self.sam_sys_inputs[\"fixed_operating_cost\"] = fixed_operating_cost", "def compute_cost(AL, Y):\n pass", "def calculate_profit(self):", "def calculate_atb_costs(self, year, scenario='Moderate'):\n if scenario == 'Advanced' or 'Moderate' or 'Conservative':\n return self._lookup_costs(year, scenario)\n else:\n raise ValueError(\"scenario type {} not recognized\".format(scenario))", "def calculate_total_cost(state):\n pass", "def _compute_calculate_cost(self):\n for order in self:\n amount_calculate_cost = 0.0\n for line in order.order_line:\n amount_calculate_cost += (line.product_id.standard_price * line.product_uom_qty)\n order.update({\n 'amount_calculate_cost': amount_calculate_cost\n })", "def calculate_cost(self):\n number_collisions = self.get_collisions()\n z = self.agent.get_position()[2]\n cs = dict(\n number_collisions=number_collisions,\n cost_collisions=number_collisions,\n # Drone should not leave valid operation space...\n cost_out_of_range=(1. if z > 2 else 0.)\n )\n # sum all costs in one total cost\n cs['cost'] = min(1, sum(v for k, v in cs.items() if k.startswith('cost_')))\n return cs", "def _trading_cost(self, current_weights, prev_weights):\n delta_weight = current_weights - prev_weights\n delta_weight = delta_weight[:-1] # No costs associated with risk-free asset\n trading_cost = self.kappa1 * cp.abs(delta_weight) + self.kappa2 * cp.square(delta_weight) # Vector of trading costs per asset\n\n return cp.sum(trading_cost)", "def create_costs():\n infinity = float(\"inf\")\n costs = {}\n costs['biysk'] = 0\n costs['barnaul'] = infinity\n costs['novosibirsk'] = infinity\n costs['belokurikha'] = infinity\n costs['tomsk'] = infinity\n costs['krasnoyarsk'] = infinity\n costs['omsk'] = infinity\n return costs", "def cost_b_v(self):\n return self._cost_b_v", "def calculate_total_cost(state):\r\n return state.cost()", "def instalation_cost(system_cost):\n return system_cost * INSTALATION", "def calculate(self):\n\n rating = 0\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n rating += getattr(self, item, 0)\n\n self.overall = (rating / self.total) / .2", "def total_cost(self):\n return (self.food_amount + self.local_transport_amount + self.other_expenses +\n self.travel_amount + self.accomodation_amount)", "def _objective_cost(self):\n\n def obj_expression_simple(model):\n total = model.A_total + model.A2_total + model.A3_total + \\\n model.A4_total\n return -total\n\n def obj_expression(model):\n total = model.A_total + model.A2_total + model.A3_total + \\\n model.A4_total\n total += model.Completion_total\n total += model.Affinity_cognitive_total\n total += model.CTu_total + model.CTl_total + model.S_total\n return -total\n\n # self.model.exp_cost = Expression(rule=obj_expression)\n # self.model.obj_cost = Objective(rule=self.model.exp_cost)\n # self.model.obj_cost = Objective(rule=obj_expression_simple)\n self.model.obj_cost = Objective(rule=obj_expression)", "def cost_mabr(blk):\n t0 = blk.flowsheet().time.first()\n\n # Get parameter dict from database\n parameter_dict = blk.unit_model.config.database.get_unit_operation_parameters(\n blk.unit_model._tech_type, subtype=blk.unit_model.config.process_subtype\n )\n\n # Get costing parameter sub-block for this technology\n A, B = blk.unit_model._get_tech_parameters(\n blk,\n parameter_dict,\n blk.unit_model.config.process_subtype,\n [\"reactor_cost\", \"blower_cost\"],\n )\n\n # Add cost variable and constraint\n blk.capital_cost = pyo.Var(\n initialize=1,\n units=blk.config.flowsheet_costing_block.base_currency,\n bounds=(0, None),\n doc=\"Capital cost of unit operation\",\n )\n\n DCC_reactor = pyo.units.convert(\n blk.unit_model.properties_treated[t0].flow_mass_comp[\"ammonium_as_nitrogen\"]\n / blk.unit_model.nitrogen_removal_rate\n * A,\n to_units=blk.config.flowsheet_costing_block.base_currency,\n )\n\n DCC_blower = pyo.units.convert(\n blk.unit_model.reactor_area * blk.unit_model.air_flow_rate[t0] * B,\n to_units=blk.config.flowsheet_costing_block.base_currency,\n )\n\n expr = DCC_reactor + DCC_blower\n\n blk.unit_model._add_cost_factor(\n blk, parameter_dict[\"capital_cost\"][\"cost_factor\"]\n )\n\n blk.capital_cost_constraint = pyo.Constraint(\n expr=blk.capital_cost == blk.cost_factor * expr\n )\n\n # Register flows\n blk.config.flowsheet_costing_block.cost_flow(\n blk.unit_model.electricity[t0], \"electricity\"\n )", "def calc_maintenance_cost (self):\n\n if str(self.comp_specs['operational costs']) \\\n != 'UNKNOWN':\n self.maintenance_cost = \\\n self.comp_specs['operational costs']\n else:\n self.maintenance_cost = \\\n (self.comp_specs['percent o&m'] / 100.0) * self.capital_costs\n #~ print 'self.maintenance_cost',self.maintenance_cost", "def calculate_cost(self):\n info = {}\n c = self.get_collisions() * self.bomb_cost\n z = self.agent.get_position()[2] # Limit range of Drone agent\n\n # sum all costs in one total cost\n info['cost_gathered_bombs'] = c\n info['cost_out_of_range'] = 1. if z > 2 else 0.\n # limit cost to be at most 1.0\n info['cost'] = min(1, sum(v for k, v in info.items()\n if k.startswith('cost_')))\n return info", "def calculate_costs(data, costs, simulation_parameters, site_radius, environment):\n inter_site_distance = site_radius * 2\n site_area_km2 = math.sqrt(3) / 2 * inter_site_distance ** 2 / 1e6\n sites_per_km2 = 1 / site_area_km2\n\n for key, value in simulation_parameters.items():\n if key == 'backhaul_distance_km_{}'.format(environment):\n backhaul_distance = value\n\n cost_breakdown = {\n 'single_sector_antenna_2x2_mimo_dual_band': (\n costs['single_sector_antenna_2x2_mimo_dual_band'] *\n simulation_parameters['sectorization'] * sites_per_km2\n ),\n 'single_remote_radio_unit': (\n costs['single_remote_radio_unit'] *\n simulation_parameters['sectorization'] * sites_per_km2\n ),\n 'single_baseband_unit': (\n costs['single_baseband_unit'] * sites_per_km2\n ),\n 'router': (\n costs['router'] * sites_per_km2\n ),\n 'tower': (\n costs['tower'] * sites_per_km2\n ),\n 'civil_materials': (\n costs['civil_materials'] * sites_per_km2\n ),\n 'transportation': (\n costs['transportation'] * sites_per_km2\n ),\n 'installation': (\n costs['installation'] * sites_per_km2\n ),\n 'battery_system': (\n costs['battery_system'] * sites_per_km2\n ),\n 'fiber_backhaul_{}'.format(environment): (\n costs['fixed_fiber_backhaul_per_km'] * backhaul_distance * sites_per_km2\n ),\n 'microwave_backhaul_1m': (\n costs['microwave_backhaul_1m'] * sites_per_km2\n )\n }\n\n total_deployment_costs_km2 = 0\n for key, value in cost_breakdown.items():\n total_deployment_costs_km2 += value\n\n output = {\n 'environment': environment,\n 'inter_site_distance': inter_site_distance,\n 'site_area_km2': site_area_km2,\n 'sites_per_km2': sites_per_km2,\n 'results_type': data['results_type'],\n 'path_loss': data['path_loss'],\n 'received_power': data['received_power'],\n 'interference': data['interference'],\n 'sinr': data['sinr'],\n 'spectral_efficiency': data['spectral_efficiency'],\n 'capacity_mbps': data['capacity_mbps'],\n 'capacity_mbps_km2': data['capacity_mbps'],\n 'total_deployment_costs_km2': total_deployment_costs_km2,\n 'sector_antenna_costs_km2': cost_breakdown['single_sector_antenna_2x2_mimo_dual_band'],\n 'remote_radio_unit_costs_km2': cost_breakdown['single_remote_radio_unit'],\n 'baseband_unit_costs_km2': cost_breakdown['single_baseband_unit'],\n 'router_costs_km2': cost_breakdown['router'],\n 'tower_costs_km2': cost_breakdown['tower'],\n 'civil_material_costs_km2': cost_breakdown['civil_materials'],\n 'transportation_costs_km2': cost_breakdown['transportation'],\n 'installation_costs_km2': cost_breakdown['installation'],\n 'battery_system_costs_km2': cost_breakdown['battery_system'],\n 'fiber_backhaul_costs_km2': cost_breakdown['fiber_backhaul_{}'.format(environment)],\n 'microwave_backhaul_1m_costs_km2': cost_breakdown['microwave_backhaul_1m'],\n }\n\n return output", "def cost(self):\n\t\treturn self.g + self.h", "def total_cost(self):\r\n return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=E1101\r", "def cost_filter_press(blk):\n t0 = blk.flowsheet().time.first()\n # Add cost variable and constraint\n blk.capital_cost = pyo.Var(\n initialize=1,\n units=blk.config.flowsheet_costing_block.base_currency,\n bounds=(0, None),\n doc=\"Capital cost of unit operation\",\n )\n\n Q = pyo.units.convert(\n blk.unit_model.properties_in[t0].flow_vol,\n to_units=pyo.units.gal / pyo.units.hr,\n )\n\n # Get parameter dict from database\n parameter_dict = blk.unit_model.config.database.get_unit_operation_parameters(\n blk.unit_model._tech_type, subtype=blk.unit_model.config.process_subtype\n )\n\n # Get costing parameter sub-block for this technology\n A, B = blk.unit_model._get_tech_parameters(\n blk,\n parameter_dict,\n blk.unit_model.config.process_subtype,\n [\"capital_a_parameter\", \"capital_b_parameter\"],\n )\n\n # Determine if a costing factor is required\n factor = parameter_dict[\"capital_cost\"][\"cost_factor\"]\n\n expr = pyo.units.convert(\n A * Q + B, to_units=blk.config.flowsheet_costing_block.base_currency\n )\n\n blk.capital_cost_constraint = pyo.Constraint(expr=blk.capital_cost == expr)\n\n # Register flows\n blk.config.flowsheet_costing_block.cost_flow(\n blk.unit_model.electricity[t0], \"electricity\"\n )", "def calculate_meal_costs(meal_base, tax_rate, tip_rate):\n tax_value = calculate_rate(meal_base, tax_rate)\n meal_with_tax = tax_value + meal_base\n tip_value = calculate_rate(meal_with_tax, tip_rate)\n total = meal_with_tax + tip_value\n meal_info = dict(meal_base=meal_base,\n tax_rate=tax_rate,\n tip_value=tip_value,\n tax_value=tax_value,\n total = total)\n return meal_info", "def line_cost(self):\r\n return self.qty * self.unit_cost", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def _get_toal_cp_(obj):\n \n fTotal = 0.0\n for item in obj.order_line:\n fTotal += item.purchase_price * item.product_uom_qty\n \n return fTotal", "def _cost(self, action):\n raise NotImplementedError", "def compute_cost(self, chrome):\n return 1", "def total_cost(self):\n return np.einsum('i->', self.c[self.s])", "def cost(self):\n abs_cost = sum(f['price'] * f['qty'] for f in self.fills)\n return -abs_cost if self.is_ask() else abs_cost", "def taxicab(a, b):\n \"*** YOUR CODE HERE ***\"\n return abs(street(a) - street(b)) + abs(avenue(a) - avenue(b))", "def taxicab(a, b):\n \"*** YOUR CODE HERE ***\"\n return abs(street(a) - street(b)) + abs(avenue(a) - avenue(b))", "def cost(self,e1,e2):\n pass", "def calculate_cost(self):\n booking_days, booking_hours = self.calculate_daily_hourly_billable_counts()\n day_cost = booking_days * Decimal(self.vehicle.type.daily_rate)\n hour_cost = booking_hours * Decimal(self.vehicle.type.hourly_rate)\n if hour_cost > self.vehicle.type.daily_rate:\n hour_cost = self.vehicle.type.daily_rate\n return float(day_cost + hour_cost)", "def unitcost(self):\n cost = self.tablecost\n\n for component, quantity in self.components.items():\n cost += component.unitcost * quantity\n\n return cost", "def set_attributes(self):\n for i, battery in enumerate(sorted(self.batteries.values(),\n key=operator.attrgetter(\"weight\"))):\n setattr(battery, \"cap\", self.caps[self.big_iterations][i])\n if self.caps[self.big_iterations][i] is 450:\n cost = 900\n elif self.caps[self.big_iterations][i] is 900:\n cost = 1350\n else:\n cost = 1800\n setattr(battery, \"cost\", cost)\n battery.capacity = self.caps[self.big_iterations][i]", "def compute_cost(AL,Y,cost_function_name):\n cost_functions = {\n \"cost_func_1\": cf.cost_function_1\n } \n\n activ_func = cost_functions.get(cost_function_name,lambda : \"Invalid Cost Function Name !\")\n\n cost,dAL = activ_func(AL,Y)\n\n return cost, dAL", "def getRateFromProjectedAccruedment(from_capital, to_capital, period):\n \"\"\"\n vc = 100 * pow(1+50.0/100,2)\n vc = 1e9\n print vc\n period = 500\n print period\n \"\"\"\n return 100 * (pow(float(to_capital)/from_capital, 1.0/period)-1)", "def cost(self):\n return self._cost", "def cost(self):\n return self._cost", "def cost(self):\n cost = {}\n if len(self.nodes) == 0:\n return cost\n resources = self.nodes[0].capacity.keys()\n for r in resources:\n values = [n.cost[r] for n in self.nodes]\n estimator = AvgAggregatorEstimator(values)\n cost[r] = estimator\n return cost", "def total_management_cost(self):\n total = 0\n total += self.output_dict['insurance_usd']\n total += self.output_dict['construction_permitting_usd']\n total += self.output_dict['bonding_usd']\n total += self.output_dict['project_management_usd']\n total += self.output_dict['markup_contingency_usd']\n total += self.output_dict['engineering_usd']\n total += self.output_dict['site_facility_usd']\n return total", "def pay_costs(self):\r\n cost = self.cost\r\n if cost:\r\n self.owner.player.char_ob.pay_money(cost)\r\n self.owner.player.msg(\"You pay %s coins for the event.\" % cost)", "def get_total_cost(self):\n total_cost = sum([item.quantity * item.product.price for item in self.orderitem_set.all()])\n return total_cost - total_cost * (self.discount / Decimal('100'))", "def get_annual_investment_cost(self, i):\n cost = 0\n for comp in self.iter_components():\n cost += comp.annualize_investment(i=i)\n\n return cost", "def get_all_cost_per_click(self,bid):\n cost1 = self.get_cost_per_click(bid, 1)\n cost2 = self.get_cost_per_click(bid, 2)\n cost3 = self.get_cost_per_click(bid, 3)\n return cost1, cost2, cost3", "def cost_cal(self, Type, Tract_pop, Tractx, Tracty, cnum):\n Geox1 = sf.FeatureScaling(self.Geox)\n Geoy1 = sf.FeatureScaling(self.Geoy)\n Tract_pop1 = sf.FeatureScaling(Tract_pop)\n Tractx1 = sf.FeatureScaling(Tractx)\n Tracty1 = sf.FeatureScaling(Tracty)\n \n self.cost, temp = ans.cost(self.demandloc, Geox1, Geoy1, Tract_pop1, Type, Tractx1, Tracty1, Tract_pop, cnum)", "def investment_price(self):\n invest = self.max_loss / (self.buy_price - self.stop_loss) * self.buy_price\n if invest > self.capital:\n return round(self.capital, 2)\n else:\n return round(invest, 2)", "def getCost(self):\n\n return self.cost", "def _return_cost(self, child_confidence, informant_reputation, child_action, informant_action, value=\"3yo\"):\n if value == '3yo':\n if child_confidence == 1 and informant_reputation == 1 and child_action == 1 and informant_action == 1:\n cost = -1.0 # (knowledge, knowledge, accept, accept) = reinforce\n elif (child_confidence == 1 and informant_reputation == 1 and child_action == 0 and informant_action == 1):\n cost = +0.5 # (knowledge, knowledge, reject, accept) = slightly punish\n elif (child_confidence == 1 and informant_reputation == 1 and child_action == 1 and informant_action == 0):\n cost = +0.5 # (knowledge, knowledge, accept, reject) = slightly punish\n elif (child_confidence == 1 and informant_reputation == 1 and child_action == 0 and informant_action == 0):\n cost = -1.0 # (knowledge, knowledge, reject, reject) = reinforce\n elif (child_confidence == 0 and informant_reputation == 1 and child_action == 1 and informant_action == 1):\n cost = -1.0 # (non-knowledge, knowledge, accept, accept) = reinforce\n elif (child_confidence == 0 and informant_reputation == 1 and child_action == 0 and informant_action == 0):\n cost = -1.0 # (non-knowledge, knowledge, reject, reject) = reinforce\n elif (child_confidence == 0 and informant_reputation == 1 and child_action == 0 and informant_action == 1):\n cost = +1.0 # (non-knowledge, knowledge, reject, accept) = reinforce\n elif (child_confidence == 0 and informant_reputation == 1 and child_action == 1 and informant_action == 0):\n cost = +1.0 # (non-knowledge, knowledge, accept, reject) = punish\n elif (child_confidence == 1 and informant_reputation == 0 and child_action == 1 and informant_action == 1):\n cost = -1.0 # (knowledge, non-knowledge, accept, accept) = reinforce\n elif (child_confidence == 1 and informant_reputation == 0 and child_action == 0 and informant_action == 1):\n cost = +0.5 # (knowledge, non-knowledge, reject, accept) = slightly punish\n elif (child_confidence == 1 and informant_reputation == 0 and child_action == 1 and informant_action == 0):\n cost = +0.5 # (knowledge, non-knowledge, accept, reject) = slightly punish\n elif (child_confidence == 1 and informant_reputation == 0 and child_action == 0 and informant_action == 0):\n cost = -1.0 # (knowledge, non-knowledge, reject, reject) = reinforce\n elif (child_confidence == 0 and informant_reputation == 0 and child_action == 1 and informant_action == 1):\n cost = -1.0 # (non-knowledge, non-knowledge, accept, accept) = reinforce\n elif (child_confidence == 0 and informant_reputation == 0 and child_action == 0 and informant_action == 1):\n cost = +1.0 # (non-knowledge, non-knowledge, reject, accept) = punish\n elif (child_confidence == 0 and informant_reputation == 0 and child_action == 1 and informant_action == 0):\n cost = +1.0 # (non-knowledge, non-knowledge, accept, reject) = punish\n elif (child_confidence == 0 and informant_reputation == 0 and child_action == 0 and informant_action == 0):\n cost = -1.0 # (non-knowledge, non-knowledge, reject, reject) = reinforce\n else:\n raise ValueError(\"ERROR: the '3yo' Bayesian Networks input values are out of range\")\n return cost\n\n elif value == '4yo':\n if (child_confidence == 1 and informant_reputation == 1 and child_action == 1 and informant_action == 1):\n cost = -1.0 # (knowledge, knowledge, accept, accept) = reinforce\n elif (child_confidence == 1 and informant_reputation == 1 and child_action == 0 and informant_action == 1):\n cost = +0.5 # (knowledge, knowledge, reject, accept) = slight punish\n elif (child_confidence == 1 and informant_reputation == 1 and child_action == 1 and informant_action == 0):\n cost = +0.5 # (knowledge, knowledge, accept, reject) = slight punish\n elif (child_confidence == 1 and informant_reputation == 1 and child_action == 0 and informant_action == 0):\n cost = -1.0 # (knowledge, knowledge, reject, reject) = reinforce\n elif (child_confidence == 0 and informant_reputation == 1 and child_action == 1 and informant_action == 1):\n cost = -1.0 # (non-knowledge, knowledge, accept, accept) = reinforce\n elif (child_confidence == 0 and informant_reputation == 1 and child_action == 0 and informant_action == 0):\n cost = -1.0 # (non-knowledge, knowledge, reject, reject) = reinforce\n elif (child_confidence == 0 and informant_reputation == 1 and child_action == 0 and informant_action == 1):\n cost = +1.0 # (non-knowledge, knowledge, reject, accept) = punish\n elif (child_confidence == 0 and informant_reputation == 1 and child_action == 1 and informant_action == 0):\n cost = +1.0 # (non-knowledge, knowledge, accept, reject) = punish\n elif (child_confidence == 1 and informant_reputation == 0 and child_action == 1 and informant_action == 1):\n cost = 0.0 # (knowledge, non-knowledge, accept, accept) =\n elif (child_confidence == 1 and informant_reputation == 0 and child_action == 0 and informant_action == 1):\n cost = 0.0 # (knowledge, non-knowledge, reject, accept) =\n elif (child_confidence == 1 and informant_reputation == 0 and child_action == 1 and informant_action == 0):\n cost = 0.0 # (knowledge, non-knowledge, accept, reject) =\n elif (child_confidence == 1 and informant_reputation == 0 and child_action == 0 and informant_action == 0):\n cost = 0.0 # (knowledge, non-knowledge, reject, reject) =\n elif (child_confidence == 0 and informant_reputation == 0 and child_action == 1 and informant_action == 1):\n cost = 0.0 # (non-knowledge, non-knowledge, accept, accept) = zero_cost\n elif (child_confidence == 0 and informant_reputation == 0 and child_action == 0 and informant_action == 1):\n cost = 0.0 # (non-knowledge, non-knowledge, reject, accept) = zero_cost\n elif (child_confidence == 0 and informant_reputation == 0 and child_action == 1 and informant_action == 0):\n cost = 0.0 # (non-knowledge, non-knowledge, accept, reject) = zero_cost\n elif (child_confidence == 0 and informant_reputation == 0 and child_action == 0 and informant_action == 0):\n cost = 0.0 # (non-knowledge, non-knowledge, reject, reject) = zero_cost\n else:\n raise ValueError(\"ERROR: the '4yo' Bayesian Networks input values are out of range\")\n return cost\n else:\n raise ValueError(\"ERROR: input value not recognised, correct values are '3yo' and '4yo'\")", "def capital(self):\n\n capital = 0\n\n for face in self.facesAround:\n\n f = Face(face)\n\n if f.nature == 'tri':\n capital += 1\n\n if f.nature == 'quad':\n capital += 2\n\n if f.nature == 'cinq':\n capital += 3\n\n return capital", "def total_cost(self):\n if self.goal:\n return self.goal + (self.community_contribution or 0)\n else:\n return 0", "def total_cost(clusters):\n inter = 0\n intra = 0\n dm = 0\n for clst in clusters:\n # print clst.label, \"has cost: \", str(clst.inter_cost), str(clst.intra_cost), str(clst.dm_cost)\n inter += clst.inter_cost\n intra += clst.intra_cost\n dm += clst.dm_cost\n total = inter + intra + dm\n #iic = inter + intra\n #print \"inter \" + str(inter) + \" intra \" + str(intra) + \" dm \" + str(dm) + \" total \" + str(total) + \" iic \" + str(iic)\n print str(inter) + \"\\t\" + str(intra) + \"\\t\" + str(dm) + \"\\t\" + str(total) # + \" in \" + str(inr)\n return inter, intra, dm, total", "def cost(foods, foods_used):\n cost = 0.00\n for i, count in foods_used.items():\n cost += (foods[i]['serving_cost'] * count)\n return cost", "def getCost(self, state, action):\n util.raiseNotDefined()", "def __cacula_agio(table):\n from m2py.misc.vectorize import column\n\n PV = table[0][-1]\n total = sum(column(table, 1))\n premium = total/PV - 1\n return round(premium, 2)", "def total_cost_w_tax(tax_rate, state, cost_amount):\n state.upper()\n default_tax_rate = 0.05\n if state == 'CA':\n total_cost = (cost_amount * .07) + cost_amount\n elif tax_rate != 0.07 or tax_rate != 0.05:\n total_cost = (cost_amount * tax_rate) + cost_amount\n else:\n total_cost = (cost_amount * default_tax_rate) + cost_amount\n return total_cost, state.upper()", "def getCost(self):\n return self._cost", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def construction_permitting(self):\n building_permits = 0.02 * self.input_dict['foundation_cost_usd']\n highway_permits = 20000 * self.input_dict['num_hwy_permits']\n construction_permitting_cost = building_permits + highway_permits\n return construction_permitting_cost", "def calculate_path_cost_with_weighted_sum(self, path, attr1, attr2): \n costs = [] \n for i in range(len(path) - 1):\n a = (1- self.G[path[i]][path[i+1]][attr2]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n b = (1- self.G[path[i]][path[i+1]][attr1]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n costs.append(a * self.G[path[i]][path[i+1]][attr1] + b * self.G[path[i]][path[i+1]][attr2]) \n return max(costs)", "def _cost_wage(self):\n avg_drive_time = np.random.normal(self.driving_time, self.driving_time_std)\n hourly_wage = np.random.normal(self.hourly_wage, self.hourly_wage_std)\n total = avg_drive_time * hourly_wage\n return total", "def GetCostIncurred(self):\r\n return self.costsIncurred", "def get_cost(org, target, amount):\n rep, _ = target.Dominion.reputations.get_or_create(organization=org)\n base = 200\n if amount > 0:\n base -= rep.respect + rep.affection\n else:\n base += rep.respect + rep.affection\n if base < 0:\n base = 0\n return base * abs(amount)", "def benefits(self, discount_rate, horizon, external_cost):\n table = [\"\"]\n table.append(self.cofiring_plant.name)\n table.append(\"-------------------\")\n row2 = \"{:30}\" + \"{:20.0f}\"\n table.append(\n row2.format(\n \"Health\", self.health_npv(external_cost, discount_rate, horizon)\n )\n )\n table.append(\n row2.format(\n \"Emission reduction\",\n self.mitigation_npv(external_cost, discount_rate, horizon),\n )\n )\n table.append(row2.format(\"Wages\", self.wages_npv(discount_rate, horizon)))\n table.append(\n row2.format(\n \"Farmer earnings before tax\",\n self.farmer.net_present_value(discount_rate, horizon),\n )\n )\n table.append(\n row2.format(\n \"Trader earnings before tax\",\n self.reseller.net_present_value(discount_rate, horizon),\n )\n )\n return \"\\n\".join(table)", "def _compute_amount(self):\n for line in self:\n line.update({\n 'price_subtotal': line.price_unit * line.quantity,\n })", "def _compute_cuantia_subtotal(self):\n for line in self:\n line.gasto = line.unidades * line.pvp", "def cost(self):\n\n return self._cost", "def test_cost(self):\n self.assertAlmostEqual(m2.cost(params), 57.629, 2)", "def taxicab(a, b):\n street_1, street_2 = street(a), street(b)\n avenue_1, avenue_2 = avenue(a), avenue(b)\n return abs(street_1 - street_2) + abs(avenue_1 - avenue_2)", "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def insurance(self):\n insurance_cost = 0.0056 * self.input_dict['project_value_usd']\n return insurance_cost", "def route_cost(self, route):\n total_weight = 0\n c = 0\n start = route[0]\n for end in route[1:]:\n y = float(self.stars[start][end]['weight']) - c\n t = total_weight + y\n c = (t - total_weight) - y\n\n total_weight = t\n\n start = end\n return total_weight", "def testActionCosts(self):\n from builder import Builder\n\n action = Parser.parse_as(cost_load.split(\"\\n\"), Action, self.domain)\n b = Builder(action)\n \n expected_term = b(\"load-costs\", \"?v\")\n self.assertEqual(action.get_total_cost(), expected_term)\n\n action.set_total_cost(25)\n self.assertEqual(action.get_total_cost(), b(25))\n\n action.set_total_cost(None)\n self.assertEqual(len(action.effect.parts), 1)\n\n action.set_total_cost(b(\"+\", (\"load-costs\", \"?v\"), 5))\n self.assertEqual(len(action.effect.parts), 2)", "def _cost_function(self, y_pred, y, m):\n sumatory = 0\n for x in range(m):\n sumatory += (y_pred[0][x] -y[0][x])**2\n\n cost = 1/(2*m) * sumatory\n return cost", "def get_hcost(self):\n hvcost = self.get_hvcost()\n dcost = self.get_dcost()\n hcost = hvcost + dcost\n return hcost", "def cost(self, output, labels, weights):\n raise NotImplementedError('Must be overridden by concrete subclass')", "def calculate_cost_for_certificate_batch(self):\n num_inputs = 1\n # output per recipient\n num_outputs = len(self.certificates_to_issue)\n # plus revocation outputs\n num_outputs += sum(1 for c in self.certificates_to_issue.values() if c.revocation_key)\n # plus global revocation, change output, and OP_RETURN\n num_outputs += 3\n self.total = tx_utils.calculate_tx_total(self.tx_cost_constants, num_inputs, num_outputs)\n return self.total", "def critical_benefit_to_cost1(sigma_df,Z,benefit_function,*params):\n return sum(sigma_df.sigma)/(sigma_df.sigma*(benefit_function((sigma_df.j+1),(sigma_df.k+1),*params)-benefit_function((sigma_df.k-sigma_df.j),(sigma_df.k+1),*params))).sum()", "def base_cost(self):\n return self._base_cost", "def year_cost_rule(_m, y):\r\n\r\n return sum(m.RHO[y, s] * m.SCEN[y, s] for s in m.S)" ]
[ "0.7806206", "0.74040896", "0.6545671", "0.64966065", "0.64836544", "0.6464849", "0.63642645", "0.6326284", "0.63027006", "0.62339413", "0.62284034", "0.6213556", "0.6188633", "0.61863375", "0.6160501", "0.61119217", "0.60997415", "0.60323167", "0.599988", "0.5985151", "0.59568524", "0.5935343", "0.5934236", "0.5933673", "0.58778775", "0.582077", "0.58155733", "0.5785474", "0.575747", "0.5714629", "0.5705979", "0.5699434", "0.5698801", "0.56592745", "0.56522506", "0.5650923", "0.5635228", "0.5606497", "0.5605945", "0.56026816", "0.55946594", "0.5573533", "0.5559639", "0.5558678", "0.5557125", "0.55470437", "0.55385494", "0.5531552", "0.5525767", "0.55226463", "0.55226463", "0.55187017", "0.55174524", "0.5508326", "0.54970735", "0.5495967", "0.54888344", "0.548169", "0.548169", "0.54776585", "0.54683805", "0.54610467", "0.5435015", "0.543063", "0.54220456", "0.5419644", "0.5405315", "0.54014397", "0.538992", "0.5387947", "0.53857017", "0.53819627", "0.5381263", "0.5370739", "0.53649616", "0.5362811", "0.5358155", "0.53451973", "0.53386474", "0.53317887", "0.53313893", "0.5329995", "0.53220046", "0.531919", "0.5317704", "0.531568", "0.5304887", "0.5301758", "0.5298865", "0.52914965", "0.52809536", "0.5280436", "0.52800703", "0.52761215", "0.5268672", "0.52536863", "0.5248677", "0.5247725", "0.5232009", "0.5228117" ]
0.7694785
1
Calculate annual electric savings created by the project. Attributes
def calc_annual_electric_savings (self): costs = self.comp_specs['diesel generator o&m'] for kW in costs.keys(): try: if self.average_load < int(kW): maintenance = self.comp_specs['diesel generator o&m'][kW] break except ValueError: maintenance = self.comp_specs['diesel generator o&m'][kW] self.baseline_generation_cost = maintenance + \ (self.pre_intertie_generation_fuel_used * self.diesel_prices) maintenance = self.capital_costs * \ (self.comp_specs['percent o&m'] / 100.0) self.proposed_generation_cost = maintenance + \ self.intertie_offset_generation_fuel_used * \ self.intertie_diesel_prices self.annual_electric_savings = self.baseline_generation_cost -\ self.proposed_generation_cost #~ print len(self.annual_electric_savings) #~ print 'self.annual_electric_savings',self.annual_electric_savings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def calc_annual_heating_savings (self):\n price = self.diesel_prices + self.cd['heating fuel premium']\n maintenance = self.comp_specs['heat recovery o&m']\n self.annual_heating_savings = -1 * \\\n (maintenance + (self.lost_heat_recovery * price))", "def _ebit(self):\n return self.net_income + self.tax_expense + self.interest_expense", "def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0", "def annualized_perf(self):\n mean_return = round(self.data.log_returns.mean() * 252, 4)\n risk = round(self.data.log_returns.std() * np.sqrt(252), 4)\n print(\"Return: {} | Risk: {}\".format(mean_return, risk))", "def find_eta_projection(self):\r\n \r\n # Get temporal range in terms of years\r\n timedelta = self.year_E_fore_gov[self.elms_E_fore_gov] - self.year_E_fore_gov[self.elms_E_fore_gov][0]\r\n # Number of years over time\r\n num_years = len(timedelta)\r\n \r\n self.t_eta_fit = np.zeros(num_years)\r\n \r\n for yr in range(0,num_years):\r\n \r\n self.t_eta_fit[yr] = timedelta[yr].days/365.25\r\n \r\n \r\n popt, _ = curve_fit(model_expected_eta,self.t_eta_fit,self.eta_gdp_fore[self.elms_E_fore_gov],p0=(0.7,0.1,0.01))\r\n \r\n self.eta_0 = popt[0]\r\n self.eta_b = popt[1]\r\n self.xi = popt[2]\r\n self.eta = model_expected_eta(self.t,self.eta_0,self.eta_b,self.xi)\r\n \r\n self.E_noncovid = model_emissions(self.eta,self.Y_noncovid)\r\n \r\n return", "def __init__(self, total_cost, ann_rate, ann_salary, portion_saved):\r\n\t\tself.total_cost = total_cost\r\n\t\tself.portion_down_payment = total_cost*0.25\r\n\t\tself.ann_rate = ann_rate\r\n\t\tself.monthly_salary = ann_salary/12\r\n\t\tself.portion_saved = portion_saved\r\n\t\tself.current_savings = [0.0,]\r\n\t\tself.months = 0\r\n\t\tself.new_saving = 0", "def annual_energy(self):\n return self['annual_energy']", "def average(self, returns):\r\n return returns.mean() * self.day", "def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")", "def __cal_aod(self, year, month, day):\n print 'Calculate...'\n logging.info('[calculate]->Calculate...')\n\n t = datetime.datetime(year, month, day)\n\n ddir = self.aodSetting.data_dir\n wdir = self.aodSetting.p_aot_dir\n ascdir = self.aodSetting.ascii_dir\n aotdir = self.aodSetting.aot_dir\n\n stations = self.aodSetting.stations\n\n # Calculate AOD\n print 'Calculate AOD...'\n logging.info('[calculate]->Calculate AOD...')\n\n for stId in stations.getstIds():\n station = stations.get(stId)\n fn = station.stId\n k7fn = path.join(self.aodSetting.merge_dir, fn, t.strftime('%Y%m'), fn + \"_\" +\n t.strftime(\"%Y%m%d\") + \"_merge.K7\")\n if not os.path.exists(k7fn):\n continue\n print '[{0}]: Ready'.format(fn)\n logging.info('[calculate]->[{0}]: Ready'.format(fn))\n nsu_dir = path.join(ascdir, fn, t.strftime('%Y%m'))\n nsufn = path.join(nsu_dir, fn + \"_\" +\n t.strftime(\"%Y%m%d\") + '.NSU')\n if not os.path.exists(nsufn):\n if not os.path.exists(nsu_dir):\n os.makedirs(nsu_dir)\n rr = spdata.decode(k7fn)\n r = spdata.extract(rr, 'NSU')\n spdata.save(r, nsufn)\n print '[{0}]: Output nsu file'.format(fn)\n logging.info('[calculate]->[{0}]: Output nsu file'.format(fn))\n\n # check if the external program and the parameter files are ready\n validated = True\n exefn = self.aodSetting.p_aot_exe\n if not os.path.exists(exefn):\n print '[{0}]: Not Found Aot program, {1}'.format(fn, exefn)\n logging.warn(\n '[calculate]->[{0}]: Not Found Aot program, {1}'.format(fn, exefn))\n validated = False\n\n inputfn = self.aodSetting.p_aot_input\n if not os.path.exists(inputfn):\n print '[{0}]: Not Found input parameter data, {1}'.format(fn, inputfn)\n logging.warn(\n '[calculate]->[{0}]: Not Found input parameter data, {1}'.format(fn, inputfn))\n validated = False\n\n ozonefn = self.aodSetting.p_aot_ozone\n if not os.path.exists(ozonefn):\n print '[{0}]: Not Found ozone data, {1}'.format(fn, ozonefn)\n logging.warn(\n '[calculate]->[{0}]: Not Found input parameter data, {1}'.format(fn, inputfn))\n validated = False\n\n calfn = path.join(self.aodSetting.p_cal_dir,\n \"calibr\" + station.calibr + \".cal\")\n if not os.path.exists(calfn):\n print '[{0}]: Not Found calculation paramter data, {1}'.format(fn, calfn)\n logging.warn(\n '[calculate]->[{0}]: Not Found calculation paramter data, {1}'.format(fn, calfn))\n validated = False\n\n if validated:\n tao_dir = path.join(aotdir, fn, t.strftime('%Y%m'))\n if not os.path.exists(tao_dir):\n os.makedirs(tao_dir)\n taofn = path.join(tao_dir, fn + \"_\" +\n t.strftime(\"%Y%m%d\") + '.tao')\n lat = station.lat\n lon = station.lon\n alt = station.alt\n\n spdata.cal_aot(wdir, calfn, taofn, nsufn,\n lat, lon, alt, alpha=1)\n print '[{0}] => {1}'.format(fn, taofn)\n logging.info('[calculate]->[{0}] => {1}'.format(fn, taofn))\n else:\n print '[{0}]: Abort'.format(fn)\n logging.warn('[calculate]->[{0}]: Abort'.format(fn))\n\n print 'Calculate Done!'\n logging.info('[calculate]->Calculate Done!')", "def annualized_return_risk(vals):\n P = 252\n v = np.array(vals)\n vt1 = v[1:]\n vt = v[:-1]\n rets = (vt1-vt)/vt\n \n ann_return = np.mean(rets)*P\n ann_risk = np.std(rets)*np.sqrt(P)\n \n return ann_return, ann_risk", "def insurance(self):\n insurance_cost = 0.0056 * self.input_dict['project_value_usd']\n return insurance_cost", "def year_average_price_rule(_m, y):\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)", "def calc_av_daily_return(self):\n av_return = 0.0\n total_ret = sum(self._returns)\n num_averages = len(self._returns)\n \n if num_averages > 0:\n av_return = total_ret/float(num_averages)\n \n self._av_daily_return = av_return\n return av_return", "def americanprice(self):\n self.americanpay = np.zeros((self.steps+1,self.steps+1))\n self.optionvalue = np.zeros((self.steps+1,self.steps+1))\n self.exercisevalue = np.zeros((self.steps+1,self.steps+1))\n self.americanpay[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n self.optionvalue[i-1][j] = (self.americanpay[i][j]*self.upprob + self.americanpay[i][j+1]*(1-self.upprob))/discount\n self.exercisevalue[i-1][j] = max(self.pricetree[i-1][j]-self.s,0.0)\n self.americanpay[i-1][j] = max(self.optionvalue[i-1][j],self.exercisevalue[i-1][j])\n return self.americanpay[0][0]", "def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]\n #~ print 'self.average_load',self.average_load", "def calc_annual_investment(devs, param):\n\n observation_time = param[\"observation_time\"]\n interest_rate = param[\"interest_rate\"]\n q = 1 + param[\"interest_rate\"]\n\n \n # Calculate capital recovery factor\n CRF = ((q**observation_time)*interest_rate)/((q**observation_time)-1)\n \n # Calculate annuity factor for each device\n for device in devs.keys():\n \n # Get device life time\n life_time = devs[device][\"life_time\"]\n\n # Number of required replacements\n n = int(math.floor(observation_time / life_time))\n \n # Inestment for replcaments\n invest_replacements = sum((q ** (-i * life_time)) for i in range(1, n+1))\n\n # Residual value of final replacement\n res_value = ((n+1) * life_time - observation_time) / life_time * (q ** (-observation_time))\n\n # Calculate annualized investments \n if life_time > observation_time:\n devs[device][\"ann_factor\"] = (1 - res_value) * CRF \n else:\n devs[device][\"ann_factor\"] = ( 1 + invest_replacements - res_value) * CRF \n \n\n return devs", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def calculate(self):\n self._emi_months = self._period * 12\n self._total_interest = math.ceil(self._loan_amount * self._period * self._rate / 100)\n self._total_amount_pi = float(self._loan_amount + self._total_interest)\n self._emi_amount = math.ceil(self._total_amount_pi / self._emi_months)\n return self", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, NDVI_Product, dict_crops, dict_non_crops, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Three as Three\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet3 as Generate\n import wa.Functions.Start.Get_Dictionaries as GD\n \n ######################### Set General Parameters ##############################\n\n # Check if there is a full year selected between Startdate and Enddate, otherwise Sheet 3 cannot be produced \n try:\n years_end = pd.date_range(Startdate,Enddate,freq=\"A\").year\n years_start = pd.date_range(Startdate,Enddate,freq=\"AS\").year\n if (len(years_start) == 0 or len(years_end) == 0):\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n years = np.unique(np.append(years_end,years_start))\n except:\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \t\n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n\n #Set Startdate and Enddate for moving average\n ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0') \n Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())\n Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)\n Enddate_Moving_Average = pd.Timestamp(Enddate) + pd.DateOffset(months = 0)\n Startdate_Moving_Average_String = '%d-%02d-%02d' %(Startdate_Moving_Average.year, Startdate_Moving_Average.month, Startdate_Moving_Average.day)\n Enddate_Moving_Average_String = '%d-%02d-%02d' %(Enddate_Moving_Average.year, Enddate_Moving_Average.month, Enddate_Moving_Average.day)\n\n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String, P_Product, Daily = 'n') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String)\n Data_Path_NDVI = Start.Download_Data.NDVI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate)\n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n\n # Create monthly GPP\n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n # Create monthly NDVI based on MOD13\n if NDVI_Product == 'MOD13':\n Dir_path_NDVI = os.path.join(Dir_Basin, Data_Path_NDVI)\n Start.Sixteendaily_to_monthly_state.Nearest_Interpolate(Dir_path_NDVI, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n DataCube_LU[DataCube_LU<0] = np.nan\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 3)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n #_______________________________Evaporation________________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #____________________________________NDVI__________________________________\n\n info = ['monthly','-', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n\n Name_NC_NDVI = DC.Create_NC_name('NDVI', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDVI):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDVI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDVI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDVI, DataCube_NDVI, 'NDVI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_NDVI\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_Prec\n\n #________________________Reference Evaporation______________________________\n\n # Reference Evapotranspiration data\n Name_NC_ETref = DC.Create_NC_name('ETref', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ETref):\n\n # Get the data of Evaporation and save as nc\n DataCube_ETref = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ETref, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ETref, DataCube_ETref, 'ETref', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_ETref\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n ############################# Calculate Sheet 3 ###########################\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n #____________ Evapotranspiration data split in ETblue and ETgreen ____________\n\n Name_NC_ETgreen = DC.Create_NC_name('ETgreen', Simulation, Dir_Basin, 3, info)\n Name_NC_ETblue = DC.Create_NC_name('ETblue', Simulation, Dir_Basin, 3, info)\n \n if not (os.path.exists(Name_NC_ETgreen) or os.path.exists(Name_NC_ETblue)):\n\n # Calculate Blue and Green ET\n DataCube_ETblue, DataCube_ETgreen = Three.SplitET.Blue_Green(Startdate, Enddate, Name_NC_LU, Name_NC_ETref, Name_NC_ET, Name_NC_P)\n\n # Save the ETblue and ETgreen data as NetCDF files\n DC.Save_as_NC(Name_NC_ETblue, DataCube_ETblue, 'ETblue', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n DC.Save_as_NC(Name_NC_ETgreen, DataCube_ETgreen, 'ETgreen', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n\n del DataCube_ETblue, DataCube_ETgreen\n \n #____________________________ Create the empty dictionaries ____________________________\n \n # Create the dictionaries that are required for sheet 3 \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary = GD.get_sheet3_empties()\n \n #____________________________________ Fill in the dictionaries ________________________\n\n # Fill in the crops dictionaries \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary = Three.Fill_Dicts.Crop_Dictionaries(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops, Name_NC_LU, Name_NC_ETgreen, Name_NC_ETblue, Name_NC_NDM, Name_NC_P, Dir_Basin)\n\n # Fill in the non crops dictionaries \n wp_y_non_crop_dictionary = Three.Fill_Dicts.Non_Crop_Dictionaries(wp_y_non_crop_dictionary, dict_non_crops)\n\n for year in years:\n\n ############################ Create CSV 3 ################################# \n \n csv_fh_a, csv_fh_b = Generate.CSV.Create(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary, Basin, Simulation, year, Dir_Basin)\n\n ############################ Create Sheet 3 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, csv_fh_a, csv_fh_b)\n \n return()", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet2 as Generate\n \n ######################### Set General Parameters ##############################\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n \n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, P_Product, Daily = 'y') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_LAI = Start.Download_Data.LAI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, LAI_Product) \n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Daily = os.path.join(Data_Path_P, 'Daily')\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create Rainy Days based on daily CHIRPS\n Data_Path_RD = Two.Rainy_Days.Calc_Rainy_Days(Dir_Basin, Data_Path_P_Daily, Startdate, Enddate)\n\n # Create monthly LAI\n Dir_path_LAI = os.path.join(Dir_Basin, Data_Path_LAI)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_LAI, Startdate, Enddate)\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n \n # Create monthly GPP \n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 2)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_Prec\n\n #_______________________________Evaporation________________________________\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n #_______________________________Rainy Days_________________________________\n\n # Define info for the nc files\n info = ['monthly','days', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_RD = DC.Create_NC_name('RD', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_RD):\n\n # Get the data of Evaporation and save as nc\n DataCube_RD = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_RD, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_RD, DataCube_RD, 'RD', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_RD\n\n #_______________________________Leaf Area Index____________________________\n\n # Define info for the nc files\n info = ['monthly','m2-m-2', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_LAI = DC.Create_NC_name('LAI', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_LAI):\n\n # Get the data of Evaporation and save as nc\n DataCube_LAI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_LAI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_LAI, DataCube_LAI, 'LAI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_LAI\n\n ####################### Calculations Sheet 2 ##############################\n \n DataCube_I, DataCube_T, DataCube_E = Two.SplitET.ITE(Dir_Basin, Name_NC_ET, Name_NC_LAI, Name_NC_P, Name_NC_RD, Name_NC_NDM, Name_NC_LU, Startdate, Enddate, Simulation)\n \n ############################ Create CSV 2 ################################# \n\n Dir_Basin_CSV = Generate.CSV.Create(Dir_Basin, Simulation, Basin, Startdate, Enddate, Name_NC_LU, DataCube_I, DataCube_T, DataCube_E, Example_dataset)\n\n ############################ Create Sheet 2 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV)\n\n return()", "def compute (self):\r\n #obtain and validate the inputs\r\n startBalance = self.amount.getNumber()\r\n rate = self.rate.getNumber()/100\r\n years = self.period.getNumber()\r\n if startBalance == 0 or rate == 0 or years == 0:\r\n return\r\n #set the header for the table\r\n result = \"%4s%18s%10s%16s\\n\" % (\"Year\", \"Starting Balance\", \"Interest\", \"Ending Balance\")\r\n #Compute and append the results for each year\r\n totalInterest = 0.0\r\n for year in range (1, years + 1):\r\n interest = startBalance * rate\r\n endBalance = startBalance + interest\r\n result += \"%4d%18.2f%10.2f%16.2f\\n\" % (year, startBalance, interest, endBalance)\r\n #the ending balance for year 1 wil lbe the starting balance for year 2 and so on\r\n startBalance = endBalance\r\n totalInterest += interest\r\n #Append the totals for the entire period - final output for the whole thing\r\n result += \"Ending Balance: $%0.2f\\n\" % endBalance\r\n result += \"Total interest earned: $%0.2f\\n\" % totalInterest\r\n #Output the result while preserving read-only status\r\n self.outputArea[\"state\"] = \"normal\"\r\n self.outputArea.setText(result)\r\n self.outputArea[\"state\"] = \"disabled\"", "def emissions_baseline(self):\n baseline = DataFrame(columns=[\"CO2\", \"NOx\", \"PM10\", \"PM2.5\", \"SO2\"])\n baseline = baseline.append(year_1(self.plant.emissions()))\n baseline = baseline.append(year_1(self.plant.fuel_reseller().emissions()))\n baseline = baseline.append(year_1(self.farmer.emissions_exante))\n baseline.loc[\"Total\"] = baseline.sum()\n baseline.loc[\"Total_plant\"] = baseline.iloc[0]\n baseline.loc[\"Total_transport\"] = baseline.iloc[1]\n baseline.loc[\"Total_field\"] = baseline.iloc[2]\n return baseline", "def annual_fee(self, working_months, year, with_bpjs=True):\n monthly_bpjs = []\n\n total_salary = self.base_salary\n if self.is_salary_allowances is True:\n fixed_allowances = self.summarize( self.fixed_allowances )\n non_fixed_allowances = self.summarize( self.non_fixed_allowances )\n total_salary = total_salary + non_fixed_allowances + fixed_allowances\n #end if\n\n # initialize variable for storing the annual bpjs\n annual_c_old_age_insurance = 0\n annual_i_old_age_insurance = 0\n annual_c_pension_insurance = 0\n annual_i_pension_insurance = 0\n annual_c_health_insurance = 0\n annual_i_health_insurance = 0\n annual_death_insurance = 0\n annual_accident_insurance = 0\n\n if with_bpjs is True:\n # only calculate bpjs if is enabled and automatically set everthing to zero when is false\n start_month = 1\n for month in range(start_month, working_months+1):\n\n company_old_age_insurance = 0\n individual_old_age_insurance = 0\n if self.old_age_insurance_status is True:\n company_old_age_insurance = \\\n self._company_old_age_insurance(total_salary)\n\n individual_old_age_insurance = \\\n self._individual_old_age_insurance(total_salary)\n #end if\n\n company_pension_insurance = 0\n individual_pension_insurance = 0\n if self.pension_insurance_status is True:\n company_pension_insurance = \\\n self._company_pension_insurance(total_salary, month, year)\n\n individual_pension_insurance = \\\n self._individual_pension_insurance(total_salary, month, year)\n #end if\n\n company_health_insurance = 0\n individual_health_insurance = 0\n if self.health_insurance_status is True:\n company_health_insurance = \\\n self._company_health_insurance(total_salary)\n\n individual_health_insurance = \\\n self._individual_health_insurance(total_salary)\n #end if\n\n death_insurance = 0\n if self.death_insurance_status is True:\n death_insurance = self._death_insurance(total_salary)\n #end if\n\n accident_insurance = 0\n if self.accident_insurance_status is True:\n accident_insurance = \\\n self._accident_insurance(total_salary, \\\n self.industry_risk_rate)\n #end if\n\n monthly = {\n \"old_age_insurance\" : {\n \"company\" : company_old_age_insurance,\n \"individual\" : individual_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : company_pension_insurance,\n \"individual\" : individual_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : company_health_insurance,\n \"individual\" : individual_health_insurance,\n },\n \"death_insurance\" : death_insurance,\n \"accident_insurance\" : accident_insurance\n }\n\n monthly_bpjs.append(monthly)\n\n annual_c_old_age_insurance = annual_c_old_age_insurance \\\n + company_old_age_insurance\n\n annual_i_old_age_insurance = annual_i_old_age_insurance \\\n + individual_old_age_insurance\n\n annual_c_pension_insurance = annual_c_pension_insurance \\\n + company_pension_insurance\n\n annual_i_pension_insurance = annual_i_pension_insurance \\\n + individual_pension_insurance\n\n annual_c_health_insurance = annual_c_health_insurance \\\n + company_health_insurance\n\n annual_i_health_insurance = annual_i_health_insurance \\\n + individual_health_insurance\n\n annual_death_insurance = annual_death_insurance\\\n + death_insurance\n\n annual_accident_insurance = annual_accident_insurance\\\n + accident_insurance\n #end for\n\n annual_bpjs = {\n \"old_age_insurance\" : {\n \"company\" : annual_c_old_age_insurance,\n \"individual\" : annual_i_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : annual_c_pension_insurance,\n \"individual\" : annual_i_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : annual_c_health_insurance,\n \"individual\" : annual_i_health_insurance,\n },\n \"death_insurance\" : annual_death_insurance,\n \"accident_insurance\" : annual_accident_insurance\n }\n return annual_bpjs", "def generate_organisation_addition(self):\n\t\treserved_columns = list()\n\t\ttotal_attandance = list()\n\t\tn = list()\n\t\tfor column in self.days[0].data.columns:\n\t\t\tif column.startswith('reserved_'):\n\t\t\t\treserved_columns.append(column)\n\t\t\t\ttotal_attandance.append(0)\n\t\t\t\tn.append(0)\n\n\t\tfor day in self.days:\n\t\t\tfor index, row in day.data.iterrows():\n\t\t\t\tfor i, column in enumerate(reserved_columns):\n\t\t\t\t\tif int(row[column]) > 0:\n\t\t\t\t\t\tweekend = True\n\t\t\t\t\t\tif int(row['day_of_week']) < 5:\n\t\t\t\t\t\t\tweekend = False\n\t\t\t\t\t\ttotal_attandance[i] += row['pool'] - self.get_average_for_month_at_time(int(row['month'])-1, int(row['hour']), int(row['minute']), weekend)\n\t\t\t\t\t\tn[i] += 1\n\n\t\tself.org_addition = dict()\n\t\tfor i, column in enumerate(reserved_columns):\n\t\t\tif n[i] > 0:\n\t\t\t\tself.org_addition[column] = total_attandance[i]/n[i]\n\t\t\telse:\n\t\t\t\tself.org_addition[column] = 0", "def annualized_volatility(self):\n return self.daily_std() * math.sqrt(252)", "def SetupYearRecForIncomeTax(\n self, earnings=0, oas=0, gis=0, cpp=0, ei=0,\n rrsp=0, bridging=0,nonreg=0, gains=0, eoy_gains=0,\n unapplied_losses=0, rrsp_contributions=0,\n age=30, retired=False, cpi=1):\n j_canuck = person.Person(strategy=self.default_strategy)\n j_canuck.capital_loss_carry_forward = unapplied_losses\n j_canuck.age += age - world.START_AGE\n j_canuck.year += age - world.START_AGE\n j_canuck.retired = retired\n\n year_rec = utils.YearRecord()\n year_rec.is_retired = j_canuck.retired\n year_rec.year = j_canuck.year\n year_rec.incomes.append(incomes.IncomeReceipt(earnings, incomes.INCOME_TYPE_EARNINGS))\n year_rec.incomes.append(incomes.IncomeReceipt(oas, incomes.INCOME_TYPE_OAS))\n year_rec.incomes.append(incomes.IncomeReceipt(gis, incomes.INCOME_TYPE_GIS))\n year_rec.incomes.append(incomes.IncomeReceipt(cpp, incomes.INCOME_TYPE_CPP))\n year_rec.incomes.append(incomes.IncomeReceipt(ei, incomes.INCOME_TYPE_EI))\n year_rec.withdrawals.append(funds.WithdrawReceipt(nonreg, gains, funds.FUND_TYPE_NONREG))\n year_rec.withdrawals.append(funds.WithdrawReceipt(rrsp, 0, funds.FUND_TYPE_RRSP))\n year_rec.withdrawals.append(funds.WithdrawReceipt(bridging, 0, funds.FUND_TYPE_BRIDGING))\n year_rec.tax_receipts.append(funds.TaxReceipt(eoy_gains, funds.FUND_TYPE_NONREG))\n year_rec.deposits.append(funds.DepositReceipt(rrsp_contributions, funds.FUND_TYPE_RRSP))\n year_rec.cpi = cpi\n\n year_rec = j_canuck.CalcPayrollDeductions(year_rec)\n\n return (j_canuck, year_rec)", "def calc_average_load (self):\n #~ self.generation = self.forecast.generation_by_type['generation diesel']\\\n #~ [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]", "def calculate_profit(self):", "def annual_update(self, state, weather, time):\n soil = state.soil\n crop_type = state.crop.current_crop\n animal_management = state.animal_management\n feed = state.feed\n\n soil.annual_mass_balance()\n\n for variable in self.annual_variables:\n self.annual_variables[variable][2] = \\\n eval(self.annual_variables[variable][0], globals(), locals())", "def averaged_risk(self):\n return self._averaged_risk", "def averaged_risk(self):\n return self._averaged_risk", "def eta_details(self):\n\t\t# Experimentation gives you 72pts to a random science every production\n\t\t# Stupid brute force implementation for now\n\t\trequired = self.required\n\t\trate = self.player.science\n\t\tdef combine(base, add, add_time, chance):\n\t\t\t# add given add into base with +add_time tick and modified by chance\n\t\t\tfor time, p in add.items():\n\t\t\t\ttime += add_time\n\t\t\t\tp *= chance\n\t\t\t\tbase[time] = base.get(time, 0) + p\n\t\tdef _eta_details(value, time_to_prod=self.galaxy.production_rate):\n\t\t\tnaive_eta = max(0, int(math.ceil((required - value)/rate)))\n\t\t\tif naive_eta <= time_to_prod: return {naive_eta: 1}\n\t\t\tbase = {}\n\t\t\twithout_extra = _eta_details(value + rate*time_to_prod)\n\t\t\twith_extra = _eta_details(value + rate*time_to_prod + 72)\n\t\t\tcombine(base, without_extra, time_to_prod, 6/7.)\n\t\t\tcombine(base, with_extra, time_to_prod, 1/7.)\n\t\t\treturn base\n\t\treturn _eta_details(self.current, self.galaxy.production_rate - self.galaxy.production_counter)", "def shareholder_equity_to_total_assets(self):\n balance_sheet = self.stock.balance_sheet_dict\n\n # Check for Null values first\n # TODO: make the note more specific\n if 'Total Assets' not in balance_sheet or 'Total Liabilities' not in balance_sheet:\n self.stock.append_calc_result('At least 50% equity to assets ratio?', 'N/A', 'N/A', 'Not enough data found')\n return\n\n value = (balance_sheet['Total Assets'] - balance_sheet['Total Liabilities']) / balance_sheet['Total Assets']\n criteria_passed = ''\n if value >= 0.5:\n criteria_passed = 'Yes'\n elif value < 0.5:\n criteria_passed = 'No'\n\n self.stock.append_calc_result('At least 50% equity to assets ratio?', round(value, 2), criteria_passed, '')", "def annual_summary(self):\n \n #Initialize dict with info about all of year's storms\n hurdat_year = {'id':[],'operational_id':[],'name':[],'max_wspd':[],'min_mslp':[],'category':[],'ace':[]}\n \n #Search for corresponding entry in keys\n count_ss_pure = 0\n count_ss_partial = 0\n iterate_id = 1\n for key in self.dict.keys():\n\n #Retrieve info about storm\n temp_name = self.dict[key]['name']\n temp_vmax = np.array(self.dict[key]['vmax'])\n temp_mslp = np.array(self.dict[key]['mslp'])\n temp_type = np.array(self.dict[key]['type'])\n temp_time = np.array(self.dict[key]['date'])\n temp_ace = self.dict[key]['ace']\n\n #Get indices of all tropical/subtropical time steps\n idx = np.where((temp_type == 'SS') | (temp_type == 'SD') | (temp_type == 'TD') | (temp_type == 'TS') | (temp_type == 'HU'))\n\n #Get times during existence of trop/subtrop storms\n if len(idx[0]) == 0: continue\n trop_time = temp_time[idx]\n if 'season_start' not in hurdat_year.keys():\n hurdat_year['season_start'] = trop_time[0]\n hurdat_year['season_end'] = trop_time[-1]\n\n #Get max/min values and check for nan's\n np_wnd = np.array(temp_vmax[idx])\n np_slp = np.array(temp_mslp[idx])\n if len(np_wnd[~np.isnan(np_wnd)]) == 0:\n max_wnd = np.nan\n max_cat = -1\n else:\n max_wnd = int(np.nanmax(temp_vmax[idx]))\n max_cat = convert_category(np.nanmax(temp_vmax[idx]))\n if len(np_slp[~np.isnan(np_slp)]) == 0:\n min_slp = np.nan\n else:\n min_slp = int(np.nanmin(temp_mslp[idx]))\n\n #Append to dict\n hurdat_year['id'].append(key)\n hurdat_year['name'].append(temp_name)\n hurdat_year['max_wspd'].append(max_wnd)\n hurdat_year['min_mslp'].append(min_slp)\n hurdat_year['category'].append(max_cat)\n hurdat_year['ace'].append(temp_ace)\n hurdat_year['operational_id'].append(self.dict[key]['operational_id'])\n \n #Handle operational vs. non-operational storms\n\n #Check for purely subtropical storms\n if 'SS' in temp_type and True not in np.isin(temp_type,['TD','TS','HU']):\n count_ss_pure += 1\n\n #Check for partially subtropical storms\n if 'SS' in temp_type:\n count_ss_partial += 1\n\n #Add generic season info\n hurdat_year['season_storms'] = len(hurdat_year['name'])\n narray = np.array(hurdat_year['max_wspd'])\n narray = narray[~np.isnan(narray)]\n hurdat_year['season_named'] = len(narray[narray>=34])\n hurdat_year['season_hurricane'] = len(narray[narray>=65])\n hurdat_year['season_major'] = len(narray[narray>=100])\n hurdat_year['season_ace'] = np.sum(hurdat_year['ace'])\n hurdat_year['season_subtrop_pure'] = count_ss_pure\n hurdat_year['season_subtrop_partial'] = count_ss_partial\n \n #Return object\n return hurdat_year", "def average( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n asum = radioastronomy.Spectrum()\n nsum = 0\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if nsum == 0:\n asum = copy.deepcopy( rs)\n firstlon = rs.gallon\n asum.ydataA = rs.ydataA * rs.durationSec\n asum.gallat = rs.gallat * rs.durationSec\n asum.gallon = rs.gallon * rs.durationSec\n nsum = 1\n firstutc = rs.utc\n lastutc = rs.utc\n else:\n asum.ydataA = asum.ydataA + (rs.ydataA * rs.durationSec)\n asum.count = asum.count + rs.count\n asum.durationSec = asum.durationSec + rs.durationSec\n # fix wrap of longitudes\n if abs(rs.gallon - firstlon) > 180:\n crossZero = True\n if rs.gallon > firstlon:\n rs.gallon = rs.gallon - 360.\n else:\n rs.gallon = rs.gallon + 360.\n asum.gallon = asum.gallon + (rs.gallon * rs.durationSec)\n asum.gallat = asum.gallat + (rs.gallat * rs.durationSec)\n # keep track of observing time for weighted sum\n lastutc = rs.utc\n nsum = nsum + 1\n #end for all files loop\n\n if nsum < 1:\n print \"No acceptable files in average list\"\n else:\n asum.ydataA = asum.ydataA/float(asum.durationSec)\n asum.gallon = asum.gallon/float(asum.durationSec)\n asum.gallat = asum.gallat/float(asum.durationSec)\n aveutc,duration = radioastronomy.aveutcs( firstutc, lastutc)\n asum.utc = aveutc\n if (duration < 1.):\n print 'hotcold.average: very short average interval: ',duration\n return nsum, asum", "def calc_annual_investment(devs, devs_dom, param):\n\n observation_time = param[\"observation_time\"]\n interest_rate = param[\"interest_rate\"]\n q = 1 + param[\"interest_rate\"]\n\n \n\n \n # Calculate capital recovery factor\n CRF = ((q**observation_time)*interest_rate)/((q**observation_time)-1)\n\n \n # Balancing Unit devices\n \n # Calculate annuity factor for each device\n for device in devs.keys():\n \n # Get device life time\n life_time = devs[device][\"life_time\"]\n\n # Number of required replacements\n n = int(math.floor(observation_time / life_time))\n \n # Inestment for replcaments\n invest_replacements = sum((q ** (-i * life_time)) for i in range(1, n+1))\n\n # Residual value of final replacement\n res_value = ((n+1) * life_time - observation_time) / life_time * (q ** (-observation_time))\n\n # Calculate annualized investments \n if life_time > observation_time:\n devs[device][\"ann_factor\"] = (1 - res_value) * CRF \n else:\n devs[device][\"ann_factor\"] = ( 1 + invest_replacements - res_value) * CRF \n \n \n # Building devices\n \n for device in devs_dom.keys():\n \n # Get device life time\n life_time = devs_dom[device][\"life_time\"]\n\n # Number of required replacements\n n = int(math.floor(observation_time / life_time))\n \n # Inestment for replcaments\n invest_replacements = sum((q ** (-i * life_time)) for i in range(1, n+1))\n\n # Residual value of final replacement\n res_value = ((n+1) * life_time - observation_time) / life_time * (q ** (-observation_time))\n\n # Calculate annualized investments \n if life_time > observation_time:\n devs_dom[device][\"ann_factor\"] = (1 - res_value) * CRF\n else: \n devs_dom[device][\"ann_factor\"] = ( 1 + invest_replacements - res_value) * CRF\n \n \n # Distribution devices (pipes, pumps)\n \n for dev in [\"pipe\", \"pump\"]: #PUmpe fehlt noch\n \n # Get device life time\n life_time = param[dev + \"_lifetime\"]\n \n # Number of required replacements\n n = int(math.floor(observation_time / life_time))\n \n # Inestment for replcaments\n invest_replacements = sum((q ** (-i * life_time)) for i in range(1, n+1))\n\n # Residual value of final replacement\n res_value = ((n+1) * life_time - observation_time) / life_time * (q ** (-observation_time))\n\n # Calculate annualized investments \n if life_time > observation_time:\n param[\"ann_factor_\" + dev] = (1 - res_value) * CRF \n else:\n param[\"ann_factor_\" + dev] = ( 1 + invest_replacements - res_value) * CRF \n\n return devs", "def getETA():", "def getETA():", "def life_insurance_to_recive_total(self):\n pass", "def get_absolute_regret(self):\n values = self.stats['return_stats']['episode_totals']\n first_episode = self.get_convergence_episode()\n final_return = self.get_final_return()\n regret = np.sum(final_return - values[:first_episode])\n return regret", "def arithmetic_ret(self) -> float:\n return float(np.log(self.tsdf).diff().mean() * self.periods_in_a_year)", "def print_analysis_prices(pv, demand,retail,export, param, E,isCommunity=False,hh=None):\n RemainingSOC=E['LevelOfCharge'][-1]\n timestep = param['timestep']\n SelfConsumption = np.sum(E['inv2load']) * timestep # AC\n TotalFromGrid = np.sum(E['grid2load']) * timestep # AC\n TotalToGrid = np.sum(E['inv2grid']) * timestep # AC\n TotalLoad = demand.sum() * timestep # AC\n #TotalBattToLoad = np.sum(E['store2load']) * timestep # AC\n TotalBattToGrid = np.sum(E['store2grid']) * timestep # AC\n TotalPV = pv.sum() * timestep # DC\n TotalBatteryGeneration = np.sum(E['store2inv']) * timestep # DC\n TotalBatteryConsumption = np.sum(E['pv2store']) * timestep # DC\n if 'inv_losses' in E.keys():\n BatteryLosses=E['batt_losses'].sum()*timestep\n InverterLosses=E['inv_losses'].sum()*timestep\n else:\n BatteryLosses = TotalBatteryConsumption * (1 - param['BatteryEfficiency'])\n InverterLosses = (TotalPV - BatteryLosses-RemainingSOC) * (1 - param['InverterEfficiency'])\n SelfConsumptionRate = SelfConsumption / TotalPV * 100 # in %\n SelfSufficiencyRate = SelfConsumption / TotalLoad * 100\n Bill=((E['grid2load'] * timestep) * retail - (E['inv2grid'] * timestep ) * export).sum()\n Batt_revenue=((E['store2load']*param['InverterEfficiency']*timestep*retail-\n E['pv2store']*param['InverterEfficiency']*timestep*export)).sum()\n \n print ('Total yearly consumption: {:1g} kWh'.format(TotalLoad))\n print ('Total PV production: {:1g} kWh'.format(TotalPV))\n print ('Self Consumption: {:1g} kWh'.format(SelfConsumption))\n print ('Total fed to the grid: {:1g} kWh'.format(TotalToGrid))\n print ('Total bought from the grid: {:1g} kWh'.format(TotalFromGrid))\n print ('Self consumption rate (SCR): {:.3g}%'.format(SelfConsumptionRate))\n print ('Self sufficiency rate (SSR): {:.3g}%'.format(SelfSufficiencyRate))\n print ('Amount of energy provided by the battery: {:1g} kWh'.format(TotalBatteryGeneration))\n print ('Total battery losses: {:1g} kWh, i.e., {:1g}% of the total PV'.format(BatteryLosses,BatteryLosses/TotalPV*100))\n #print('Total energy from battery to the load {:1g} kWh'.format(TotalBattToLoad))\n print('Total energy from battery to the grid {:1g} kWh'.format(TotalBattToGrid))\n #print ('Total inverter losses: {:1g} kWh'.format(InverterLosses))\n #print ('Total inverter losses: {:1g} kWh'.format(InverterLosses))\n print ('Total inverter losses: {:1g} kWh, i.e., {:1g}% of the total PV'.format(InverterLosses,InverterLosses/TotalPV*100))\n \n \n TotalCurtailment=np.sum(E['inv2curt'])*timestep # DC\n print ('Total curtailment : {:1g} kWh'.format(TotalCurtailment)) \n residue = TotalPV + TotalFromGrid - TotalToGrid - BatteryLosses - InverterLosses - TotalLoad - TotalCurtailment - RemainingSOC\n print ('Residue (check): {:1g} kWh'.format(residue))\n PV_check = TotalPV - SelfConsumption - TotalToGrid - BatteryLosses - InverterLosses - TotalCurtailment - RemainingSOC\n print ('PV Residue (check): {:1g} kWh'.format(PV_check))\n \n print(bcolors.WARNING + 'Maximum power injected into the grid is {:1g} kW'.format(E['inv2grid'].max())+bcolors.ENDC)\n print(bcolors.WARNING + 'Maximum power drained from the grid is {:1g} kW'.format(E['grid2load'].max())+bcolors.ENDC)\n print (bcolors.WARNING + 'Total bill: {:1g}\\n\\n'.format(Bill)+bcolors.ENDC)\n print (bcolors.WARNING + 'Total Batt_revenue: {:1g}\\n\\n'.format(Batt_revenue)+bcolors.ENDC)\n \n if isCommunity==False:\n AverageDepth = TotalBatteryGeneration / (365 * param['BatteryCapacity'])\n Nfullcycles = 365 * AverageDepth \n print ('Number of equivalent full cycles per year: {:1g} '.format(Nfullcycles))\n print ('Average Charging/Discharging depth: {:1g}\\n\\n'.format(AverageDepth))\n \n out = { 'SCR': SelfConsumptionRate, # \n 'SSR':SelfSufficiencyRate, # \n 'EFC': Nfullcycles, # \n 'Demand_peak': E['grid2load'].max(), # \n 'Inj_peak': E['inv2grid'].max(), #\n 'avg_dod': AverageDepth, #\n 'bill': Bill,\n 'Batt_revenue':Batt_revenue,\n 'Batt_penetration':param['batt_penetration'],\n 'PV_penetration':param['pv_penetration'],\n 'seed':param['seed'],\n 'hh':hh\n }\n else:\n out = { 'SCR': SelfConsumptionRate, # \n 'SSR':SelfSufficiencyRate, # \n 'EFC': None, # \n 'Demand_peak': E['grid2load'].max(), # \n 'Inj_peak': E['inv2grid'].max(), #\n 'avg_dod': None, #\n 'bill': Bill,\n 'Batt_revenue':Batt_revenue,\n 'Batt_penetration':param['batt_penetration'],\n 'PV_penetration':param['pv_penetration'],\n 'seed':param['seed'],\n 'hh':hh\n }\n return out", "def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)", "def get_annual_energy_demand(cfg):\n houses_dict = cfg['houses']\n houses_list = sorted(houses_dict.keys())\n\n # Calculate annual energy demand of houses\n # and store the result in the dict containing the house info\n for house_name in houses_list:\n house_type = houses_dict[house_name]['house_type']\n N_Pers = houses_dict[house_name].get('N_Pers', None)\n N_WE = houses_dict[house_name].get('N_WE', None)\n\n # Assign defaults if values are not defined\n if house_type == 'EFH' and pd.isna(N_Pers):\n N_Pers = 3\n houses_dict[house_name]['N_Pers'] = N_Pers\n logger.warning('N_Pers not defined for ' + str(house_name)\n + '. Using default ' + str(N_Pers))\n if house_type == 'MFH' and pd.isna(N_WE):\n N_WE = 2\n houses_dict[house_name]['N_WE'] = N_WE\n logger.warning('N_WE not defined for ' + str(house_name)\n + '. Using default ' + str(N_WE))\n\n # Implement the restrictions defined on page 3:\n if house_type == 'EFH' and N_Pers > 12:\n logger.warning('VDI 4655 is only defined for N_Pers <= 12. '\n + str(house_name) + ' uses N_Pers = ' + str(N_Pers)\n + '. Proceeding with your input...')\n if house_type == 'MFH' and N_WE > 40:\n logger.warning('VDI 4655 is only defined for N_WE <= 40. '\n + str(house_name) + ' uses N_WE = ' + str(N_WE)\n + '. Proceeding with your input...')\n\n # Calculate annual energy demand estimates\n if house_type == 'EFH':\n # (6.2.2) Calculate annual electrical energy demand of houses:\n if N_Pers < 3:\n W_a = N_Pers * 2000 # kWh\n elif N_Pers <= 6:\n W_a = N_Pers * 1750 # kWh\n else:\n W_a = N_Pers * 1500 # kWh\n\n # (6.2.3) Calculate annual DHW energy demand of houses:\n Q_TWW_a = N_Pers * 500 # kWh\n\n elif house_type == 'MFH':\n # (6.2.2) Calculate annual electrical energy demand of houses:\n W_a = N_WE * 3000 # kWh\n\n # (6.2.3) Calculate annual DHW energy demand of houses:\n Q_TWW_a = N_WE * 1000 # kWh\n\n else:\n # No house category given. Just use annual demand of 1 kWh\n W_a = 1\n Q_TWW_a = 1\n\n # If W_a and/or Q_TWW_a were already defined by the user in the yaml\n # file, we use those values instead of the calculated ones:\n W_a = houses_dict[house_name].get('W_a', W_a)\n Q_TWW_a = houses_dict[house_name].get('Q_TWW_a', Q_TWW_a)\n\n # Store the results in the dict\n houses_dict[house_name]['W_a'] = W_a\n houses_dict[house_name]['Q_TWW_a'] = Q_TWW_a\n\n # Assign defaults if values are not defined\n if houses_dict[house_name].get('Q_Heiz_a', None) is None:\n Q_Heiz_a = 1 # kWh\n houses_dict[house_name]['Q_Heiz_a'] = Q_Heiz_a\n logger.warning('Q_Heiz_a not defined for ' + house_name\n + '. Using default ' + str(Q_Heiz_a) + ' kWh')\n\n # Apply the adjustment factors\n houses_dict[house_name]['Q_Heiz_a'] *= \\\n cfg.get('adjustment_factors', dict()).get('f_Q_Heiz', 1)\n\n houses_dict[house_name]['W_a'] *= \\\n cfg.get('adjustment_factors', dict()).get('f_W', 1)\n\n houses_dict[house_name]['Q_TWW_a'] *= \\\n cfg.get('adjustment_factors', dict()).get('f_Q_TWW', 1)\n\n return houses_dict", "def compute(self):\r\n #obtain and validate the inputs\r\n startBalance = self.amount.getNumber()\r\n rate = self.rate.getNumber() / 100\r\n years = self.period.getNumber()\r\n if startBalance == 0 or rate == 0 or years == 0:\r\n return\r\n \r\n #Set the header for the table\r\n result = \"%4s%18s%10s%16s\\n\" % (\"Year\", \"Starting Balance\", \"Interest\", \"Ending Balance\")\r\n\r\n #Compute and apend the results for each year\r\n totalInterest = 0.0\r\n for year in range(1, years + 1):\r\n interest = startBalance * rate\r\n endBalance = startBalance + interest\r\n result += \"%4d%18.2f%10.2f%16.2f\\n\" % (year, startBalance, interest, endBalance)\r\n startBalance = endBalance\r\n totalInterest += interest\r\n\r\n #Append the totals for the period\r\n result += \"Ending balance: $%0.2f\\n\" % endBalance\r\n result += \"Total interest earned: $%0.2f\\n\" % totalInterest\r\n\r\n #Output the results while preserving read-only status\r\n self.outputArea[\"state\"] = \"normal\"\r\n self.outputArea.setText(result)\r\n self.outputArea[\"state\"] = \"disabled\"", "def years_to_pay(self) -> float:\n return round(self.term / self.term_multiplier * self.n_periods / 12, 1)", "def calc_performance(self):\n for symbol in self.portfolio.assets.keys():\n\n # Total the Performance of all the trades\n start = self.portfolio.trades[symbol].index[0]\n end = self.portfolio.trades[symbol].index[-1]\n trades = len(self.record[symbol])\n profit = self.record[symbol]['profit'].sum()\n loss = self.record[symbol]['loss'].sum()\n # Total or average the trade info for all the trades\n try:\n wins = len(self.record[symbol].groupby('win/loose').groups['w'])\n except (ValueError, KeyError):\n wins = 0\n try:\n losses = len(self.record[symbol].groupby('win/loose').groups['l'])\n except (ValueError, KeyError):\n losses = 0\n try:\n washes = len(self.record[symbol].groupby('win/loose').groups['-'])\n except (ValueError, KeyError):\n washes = 0\n max_drawdown = self.record[symbol]['drawdown'].max()\n average_drawdown = self.record[symbol]['drawdown'].mean()\n max_drawdown_time = self.record[symbol]['drawdown days'].max()\n average_drawdown_time = self.record[symbol]['drawdown days'].mean()\n # Average the risk and market comparisons for all trades\n vol_risk = self.record[symbol]['volatility'].mean()\n beta = self.record[symbol]['beta'].mean()\n lpm_risk = self.record[symbol]['lpm'].mean()\n e_r = self.record[symbol]['expected_return'].mean()\n # Calculate Risk measures\n treynor_ratio = (e_r - self.risk_free_return) / beta\n sharpe_ratio = (e_r - self.risk_free_return) / vol_risk\n # Package up the data for each symbol\n self.performance[symbol] = {\n 'start': start,\n 'end': end,\n 'trades': trades,\n 'wins': wins,\n 'losses': losses,\n 'washes': washes,\n 'profit': profit,\n 'loss': loss,\n 'net_profit': profit - loss,\n 'profit_factor': profit / loss if loss != 0 else 1.0,\n 'percent_profitable': wins / trades if trades != 0 else 0.0,\n 'average_trade_net_profit' : (profit - loss) / trades if trades != 0 else 0.0,\n 'max_drawdown' : max_drawdown,\n 'average_drawdown' : average_drawdown,\n 'max_drawdown_days' : max_drawdown_time,\n 'average_drawdown_days' : average_drawdown_time,\n 'volatility_risk' : vol_risk,\n 'beta' : beta,\n 'lower_partial_moment_risk' : lpm_risk,\n 't_r' : treynor_ratio,\n 's_r' : sharpe_ratio\n }\n\n return self", "def yearlyDepreciation():\n return .10", "def wwhr_savings(dwelling):\n # TODO: Variables were defined but not used\n # savings = 0\n # Nshower_with_bath = 1\n # Nshower_without_bath = 0\n Nshower_and_bath = dwelling.wwhr_total_rooms_with_shower_or_bath\n\n S_sum = 0\n for sys in dwelling.wwhr_systems:\n effy = sys['pcdf_sys']['effy_mixer_shower'] / 100\n util = sys['pcdf_sys']['utilisation_mixer_shower']\n S_sum += (sys['Nshowers_with_bath'] * .635 * effy *\n util + sys['Nshowers_without_bath'] * effy * util)\n\n Seff = S_sum / Nshower_and_bath\n Tcoldm = numpy.array(\n [11.1, 10.8, 11.8, 14.7, 16.1, 18.2, 21.3, 19.2, 18.8, 16.3, 13.3, 11.8])\n Awm = .33 * 25 * MONTHLY_HOT_WATER_TEMPERATURE_RISE / (41 - Tcoldm) + 26.1\n Bwm = .33 * 36 * MONTHLY_HOT_WATER_TEMPERATURE_RISE / (41 - Tcoldm)\n\n savings = (dwelling.Nocc * Awm + Bwm) * Seff * (35 - Tcoldm) * \\\n 4.18 * DAYS_PER_MONTH * MONTHLY_HOT_WATER_FACTORS / 3600.\n\n return savings", "def project_management(self):\n # todo: add relationship to site-specific interface with public infrastructure\n if self.output_dict['actual_construction_months'] < 28:\n project_management_cost = (53.333 * self.output_dict['actual_construction_months'] ** 2 -\n 3442 * self.output_dict['actual_construction_months'] +\n 209542) * (self.output_dict['actual_construction_months'] + 2)\n else:\n project_management_cost = (self.output_dict['actual_construction_months'] + 2) * 155000\n return project_management_cost", "def test_period_average():\n\n time_point = datetime(2012, 12, 31)\n period = 25\n spy = DEFAULT_ASSET_FACTORY.make_asset(\"SPY\")\n\n weatherman = weathermen.period_average(CALENDAR)\n forecast = weatherman(DEFAULT_ASSET_FACTORY, time_point, period)\n\n assert is_close(forecast.cagr(spy), .152)", "def _ebitda(self):\n try:\n return self.net_income + self.tax_expense + self.interest_expense + self.depreciation_amortization\n except TypeError:\n logger.exception(\n 'net_income: {}, tax_expense: {}, interest_expense: {}, depreciation_amortization: {}'\n .format(self.net_income, self.tax_expense,\n self.interest_expense,\n self.depreciation_amortization))", "def __init__(self, financial_params, start_year, end_year):\n super().__init__(financial_params, start_year, end_year)\n self.horizon_mode = financial_params['analysis_horizon_mode']\n self.location = financial_params['location']\n self.ownership = financial_params['ownership']\n self.state_tax_rate = financial_params['state_tax_rate']/100\n self.federal_tax_rate = financial_params['federal_tax_rate']/100\n self.property_tax_rate = financial_params['property_tax_rate']/100\n self.ecc_mode = financial_params['ecc_mode']\n self.ecc_df = pd.DataFrame()\n self.equipment_lifetime_report = pd.DataFrame()\n self.tax_calculations = None\n\n self.Scenario = financial_params['CBA']['Scenario']\n self.Finance = financial_params['CBA']['Finance']\n self.valuestream_values = financial_params['CBA']['valuestream_values']\n self.ders_values = financial_params['CBA']['ders_values']\n if 'Battery' in self.ders_values.keys():\n self.ders_values['Battery'] = self.ders_values.pop('Battery')\n if 'CAES' in self.ders_values.keys():\n self.ders_values['CAES'] = self.ders_values.pop('CAES')\n\n self.value_streams = {}\n self.ders = []\n\n self.macrs_depreciation = {\n 3: [33.33, 44.45, 14.81, 7.41],\n 5: [20, 32, 19.2, 11.52, 11.52, 5.76],\n 7: [14.29, 24.49, 17.49, 12.49, 8.93, 8.92, 8.93, 4.46],\n 10: [10, 18, 14.4, 11.52, 9.22, 7.37, 6.55, 6.55, 6.56, 6.55,\n 3.28],\n 15: [5, 9.5, 8.55, 7.7, 6.83, 6.23, 5.9, 5.9, 5.91, 5.9,\n 5.91, 5.9, 5.91, 5.9, 5.91, 2.95],\n 20: [3.75, 7.219, 6.677, 6.177, 5.713, 5.285, 4.888, 4.522, 4.462, 4.461,\n 4.462, 4.461, 4.462, 4.461, 4.462, 4.461, 4.462, 4.461, 4.462, 4.461,\n 2.231]\n }", "def calculate(self):\n\n rating = 0\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n rating += getattr(self, item, 0)\n\n self.overall = (rating / self.total) / .2", "def scenario_average_price_rule(_m, y, s):\r\n\r\n return m.SCENARIO_REVENUE[y, s] / m.SCENARIO_DEMAND[y, s]", "def average_emission(data: List[EmissionPerCapita], current_year: int) -> float:\r\n\r\n index = current_year - data[0].start_year # get the index for current year\r\n\r\n # Get all emissions from that year.\r\n current_year_emissions = []\r\n for countries in data:\r\n current_year_emissions.append(countries.epc_year[index])\r\n\r\n average = sum(current_year_emissions) / len(data)\r\n return average", "def avg(year):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"year\"].isin([year])]\r\n df = df[(\r\n df[\r\n \"emissions\"\r\n ] == 'Emissions (thousand metric tons of carbon dioxide)'\r\n )]\r\n print(df)\r\n mean_value = df.mean()['value']\r\n resultat = {}\r\n resultat[\"year\"] = year\r\n resultat['total'] = float(mean_value)\r\n print(mean_value)\r\n return resultat", "def price_to_3_year_earnings_less_than_15(self):\n\n note = ''\n # check if 'EPS' exists\n if 'EPS' not in self.stock.main_df.columns:\n note = note + 'Could not find EPS on MacroTrends. '\n\n # check if Current price is not 0\n if self.stock.stats_dict['Current Price'] == 0:\n note = note + 'Could not find current price on MacroTrends. '\n\n if note != '':\n self.stock.append_calc_result('3 year P/E ratio < 15 ?', 'N/A', 'N/A', note)\n return\n\n curr_price = self.stock.stats_dict['Current Price']\n df = self.stock.main_df\n\n average = 0\n # i want to use 2020 if not empty and 2019 if 2020 is empty\n if not np.isnan(df.iloc[0]['EPS']):\n # current year is there\n past_3_years_df = df.iloc[0: 3]['EPS']\n average = past_3_years_df.mean()\n elif np.isnan(df.iloc[0]['EPS']):\n # current year is not there\n past_3_years_df = df.iloc[1: 4]['EPS']\n average = past_3_years_df.mean()\n if np.isnan(df.iloc[1]['EPS']):\n # past year is not there either\n past_7_years_df = df.iloc[2: 5]['EPS']\n average = past_7_years_df.mean()\n if np.isnan(df.iloc[2]['EPS']):\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A',\n 'Must not have filed their annual report for {}'.format(\n self.current_year - 2))\n return\n\n if average == 0:\n self.stock.append_calc_result('3 year P/E ratio < 15 ?', 'N/A', 'N/A',\n 'No average found')\n return\n elif (curr_price / average) <= 15:\n criteria_passed = 'Yes'\n else:\n criteria_passed = 'No'\n\n self.stock.append_calc_result('3 year P/E ratio < 15 ?', round((curr_price / average), 2),\n criteria_passed, '3 Year Average EPS = {}'.format(round(average, 2)))", "def getAvgMarketCosts(self):\n try:\n avgAL = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldAL']\n avgEC = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldEC']\n avgIA = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldIA']\n except:\n avgAL = 0.0\n avgEC = 0.0\n avgIA = 0.0\n \n return (avgAL, avgEC, avgIA)", "def return_on_total_assets():\r\n x = float(input(\"Please Enter Net Income Value: \"))\r\n y = float(input(\"Please Enter Interest Expense Value: \"))\r\n z = float(input(\"Please Enter Beginning Total Assets Value: \"))\r\n w = float(input(\"Please Enter Ending Total Assets Value: \"))\r\n d = ((float(x)+float(y)) / ((float(z)+float(w)) / float(2))) * float(100)\r\n print \">> Your Rate of Return on Total Assets is\",round(d,1),\"%\"", "def incumbant_firm(self, wage):\n \n \n \n # a. demand for capital (capital policy function)\n pol_k = (self.alpha /(self.ret *(1+self.tau_capital)))**((1-self.gamma)/(1-self.gamma-self.alpha)) \\\n * (self.gamma /(wage * (1+self.tau_labor)))**(self.gamma/(1-self.gamma-self.alpha)) \\\n * (self.grid_s_matrix*(1-self.tau_output))**(1/(1-self.alpha-self.gamma))\n \n # b. demand of labor (labor policy function)\n pol_n = (1+self.tau_capital) * self.ret * self.gamma / ((1+self.tau_labor) * wage * self.alpha) * pol_k\n #pol_n = ((smatrix*(1-self.tau_output) * gamma) / wage)**(1/(1-gamma)) * pol_k**(alpha/(1-gamma))\n \n # c. incumbant profit\n pi=(1-self.tau_output) * self.grid_s_matrix * pol_k**self.alpha * pol_n**self.gamma \\\n - (1+self.tau_labor)* wage * pol_n - (1+self.tau_capital) * self.ret * pol_k - self.cf\n \n # d. discounted present value of an incumbent establishment, W(s,pol_k(s,theta))\n W = pi / (1-self.rho)\n \n return pol_k, pol_n, pi, W", "def year_cost_rule(_m, y):\r\n\r\n return sum(m.RHO[y, s] * m.SCEN[y, s] for s in m.S)", "def genMarketStat(self):\n myMarketStat = marketstat.MarketStat({'id':str(self.currentRound)})\n self.marketStats[str(self.currentRound)] = myMarketStat\n # set avg price to last rounds market avg price\n if self.currentRound > 1:\n lastMarketStat = self.marketStats[str(self.currentRound-1)]\n myMarketStat.avgSoldAL = lastMarketStat.avgSoldAL\n myMarketStat.avgSoldEC = lastMarketStat.avgSoldEC\n myMarketStat.avgSoldIA = lastMarketStat.avgSoldIA", "def annualize_rets(r, periods_per_year):\n compounded_growth = (1+r).prod()\n n_periods = r.shape[0]\n return compounded_growth**(periods_per_year/n_periods)-1", "def ventilation_rate(self):\n # TODO: calculate based on MERV ratings/efficiency/power/etc.\n return (\n sum(v.calculate_ach(self.volume) for v in self.air_quality_measures)\n + self.outdoor_air_ventilation\n )", "def main():\n arrs = Arrivals(27, 37, 1)\n s = 0.0\n for i in range(50):\n n = arrs.get_arrivals()\n s += n\n print 'Arrival {}: {}'.format(i, n)\n print 'Average arrivals: {}'.format(s / 50.0)", "def calculate(self):", "def runRandomEntryStrat(self):\n start, end = self.randomDays()\n \n gain = (self.df.adj_close[end] - getInfl(self.df.adj_close[start], start.year, end.year)) / \\\n getInfl(self.df.adj_close[start], start.year, end.year)\n #if gain > 6:\n # print \"Windfall: \", start, end, gain\n return gain", "def monthly_fee(self):\n total_salary = self.base_salary\n if self.is_salary_allowances is True:\n fixed_allowances = self.summarize( self.fixed_allowances )\n non_fixed_allowances = self.summarize( self.non_fixed_allowances )\n total_salary = total_salary + non_fixed_allowances + fixed_allowances\n #end if\n\n company_old_age_insurance = 0\n individual_old_age_insurance = 0\n if self.old_age_insurance_status is True:\n company_old_age_insurance = \\\n self._company_old_age_insurance(total_salary)\n\n individual_old_age_insurance = \\\n self._individual_old_age_insurance(total_salary)\n #end if\n\n company_pension_insurance = 0\n individual_pension_insurance = 0\n if self.pension_insurance_status is True:\n company_pension_insurance = \\\n self._company_pension_insurance(total_salary)\n\n individual_pension_insurance = \\\n self._individual_pension_insurance(total_salary)\n #end if\n\n company_health_insurance = 0\n individual_health_insurance = 0\n if self.health_insurance_status is True:\n company_health_insurance = \\\n self._company_health_insurance(total_salary)\n\n individual_health_insurance = \\\n self._individual_health_insurance(total_salary)\n #end if\n\n death_insurance = 0\n if self.death_insurance_status is True:\n death_insurance = self._death_insurance(total_salary)\n #end if\n\n accident_insurance = 0\n if self.accident_insurance_status is True:\n accident_insurance = \\\n self._accident_insurance(total_salary, \\\n self.industry_risk_rate)\n #end if\n\n monthly = {\n \"old_age_insurance\" : {\n \"company\" : company_old_age_insurance,\n \"individual\" : individual_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : company_pension_insurance,\n \"individual\" : individual_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : company_health_insurance,\n \"individual\" : individual_health_insurance,\n },\n \"death_insurance\" : death_insurance,\n \"accident_insurance\" : accident_insurance\n }\n return monthly", "def avg_annual_returns(end_of_year_returns, mstat):\n\n # imports mean stats\n from scipy.stats import mstats\n\n # converts returns dict to an array (in decimal fmt)\n returns_arr = np.array(list(end_of_year_returns.values()))/100\n\n if mstat == 'geometric':\n\n # calculates the geometric mean\n gmean_returns = (mstats.gmean(1 + returns_arr) - 1)*100\n\n return round(gmean_returns, 2)\n\n if mstat == 'arithmetic':\n\n # calculates the arithmetic mean\n mean_returns = np.mean(returns_arr)\n\n return round(mean_returns, 2)", "def find_end_year(self, der_list):\n project_start_year = self.start_year\n user_given_end_year = self.end_year\n # (1) User-defined (this should continue to be default)\n if self.horizon_mode == 1:\n self.end_year = user_given_end_year\n # (2) Auto-calculate based on shortest equipment lifetime. (No size optimization)\n if self.horizon_mode == 2:\n shortest_lifetime = 1000 # no technology should last 1000 years -- so this is safe to hardcode\n for der_instance in der_list:\n shortest_lifetime = min(der_instance.expected_lifetime, shortest_lifetime)\n if der_instance.being_sized():\n TellUser.error(\"Analysis horizon mode == 'Auto-calculate based on shortest equipment lifetime', DER-VET will not size any DERs \" +\n f\"when this horizon mode is selected. {der_instance.name} is being sized. Please resolve and rerun.\")\n self.end_year = pd.Period(year=0, freq='y') # cannot preform size optimization with mode==2\n self.end_year = project_start_year + shortest_lifetime-1\n # (3) Auto-calculate based on longest equipment lifetime. (No size optimization)\n if self.horizon_mode == 3:\n longest_lifetime = 0\n for der_instance in der_list:\n if der_instance.technology_type != 'Load':\n longest_lifetime = max(der_instance.expected_lifetime, longest_lifetime)\n if der_instance.being_sized():\n TellUser.error(\"Analysis horizon mode == 'Auto-calculate based on longest equipment lifetime', DER-VET will not size any DERs \" +\n f\"when this horizon mode is selected. {der_instance.name} is being sized. Please resolve and rerun.\")\n self.end_year = pd.Period(year=0, freq='y') # cannot preform size optimization with mode==3\n self.end_year = project_start_year + longest_lifetime-1\n return self.end_year", "def run(self, year):\r\n cache_directory = self.config['cache_directory']\r\n simulation_state = SimulationState()\r\n simulation_state.set_cache_directory(cache_directory)\r\n simulation_state.set_current_time(year)\r\n attribute_cache = AttributeCache()\r\n sc = SessionConfiguration(new_instance=True,\r\n package_order=self.config['dataset_pool_configuration'].package_order,\r\n in_storage=attribute_cache)\r\n dataset_pool = sc.get_dataset_pool()\r\n\r\n hh_set = dataset_pool.get_dataset('household')\r\n zone_set = dataset_pool.get_dataset('zone')\r\n job_set = dataset_pool.get_dataset('job')\r\n locations_to_disaggregate = self.config['travel_model_configuration']['locations_to_disaggregate']\r\n len_locations_to_disaggregate = len(locations_to_disaggregate)\r\n if len_locations_to_disaggregate > 0:\r\n primary_location = locations_to_disaggregate[0]\r\n if len_locations_to_disaggregate > 1:\r\n intermediates_string = \", intermediates=[\"\r\n for i in range(1, len_locations_to_disaggregate):\r\n intermediates_string = \"%s%s, \" % (intermediates_string, locations_to_disaggregate[i])\r\n intermediates_string = \"%s]\" % intermediates_string\r\n else:\r\n intermediates_string = \"\"\r\n hh_set.compute_variables(['%s = household.disaggregate(%s.%s %s)' % (zone_set.get_id_name()[0],\r\n primary_location, zone_set.get_id_name()[0],\r\n intermediates_string)], \r\n dataset_pool=dataset_pool)\r\n job_set.compute_variables(['%s = job.disaggregate(%s.%s %s)' % (zone_set.get_id_name()[0],\r\n primary_location, zone_set.get_id_name()[0],\r\n intermediates_string)], \r\n dataset_pool=dataset_pool)\r\n \r\n return self._call_input_file_writer(year, dataset_pool)", "def output_results(self):\n for ba in self.regions:\n if (ba in self.import_regions) or (ba in self.generation_regions):\n continue\n if ba in BA_930_INCONSISTENCY[self.year]:\n logger.warning(f\"Using D instead of (G-TI) for consumed calc in {ba}\")\n self.results[ba][\"net_consumed_mwh\"] = self.eia930.df[\n KEYS[\"E\"][\"D\"] % ba\n ][self.generation.index]\n else:\n self.results[ba][\"net_consumed_mwh\"] = (\n self.generation[ba] - self.eia930.df[KEYS[\"E\"][\"TI\"] % ba]\n )[self.generation.index]\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n self.results[ba][get_column(pol, adjustment=adj)] = (\n self.results[ba][\n get_rate_column(pol, adjustment=adj, generated=False)\n ]\n * self.results[ba][\"net_consumed_mwh\"]\n )\n\n # Although we directly calculate rates, to calculate annual average rates\n # we sum emissions and generation then divide.\n for time_resolution in TIME_RESOLUTIONS:\n time_dat = self.results[ba].copy(deep=True)\n\n # Get local timezone\n assert not pd.isnull(self.ba_ref.loc[ba, \"timezone_local\"])\n time_dat[\"datetime_local\"] = time_dat.index.tz_convert(\n self.ba_ref.loc[ba, \"timezone_local\"]\n )\n time_dat = time_dat.reset_index() # move datetime_utc to column\n time_dat = time_dat[\n time_dat.datetime_local.dt.year == self.year\n ] # keep year of local data\n\n if time_resolution == \"hourly\":\n # No resampling needed; keep timestamp cols in output\n time_cols = [\"datetime_utc\", \"datetime_local\"]\n missing_hours = time_dat[time_dat.isna().any(axis=1)]\n if len(missing_hours) > 0:\n logger.warning(\n f\"{len(missing_hours)} hours are missing in {ba} consumed data\"\n )\n elif time_resolution == \"monthly\":\n time_dat[\"month\"] = time_dat.datetime_local.dt.month\n # Aggregate to appropriate resolution\n time_dat = (\n time_dat.groupby(\"month\")[EMISSION_COLS + [\"net_consumed_mwh\"]]\n .sum()\n .reset_index() # move \"month\" to column\n )\n time_cols = [\"month\"]\n elif time_resolution == \"annual\":\n time_dat[\"year\"] = time_dat.datetime_local.dt.year\n # Aggregate to appropriate resolution\n time_dat = (\n time_dat.groupby(\"year\")[EMISSION_COLS + [\"net_consumed_mwh\"]]\n .sum()\n .reset_index() # move \"year\" to column\n )\n time_cols = [\"year\"]\n\n # Calculate rates from summed emissions, consumption\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n rate_col = get_rate_column(pol, adj, generated=False)\n emission_col = get_column(pol, adj)\n time_dat[rate_col] = (\n time_dat[emission_col] / time_dat[\"net_consumed_mwh\"]\n )\n\n # Output\n output_to_results(\n time_dat[time_cols + CONSUMED_EMISSION_RATE_COLS],\n ba,\n f\"/carbon_accounting/{time_resolution}/\",\n self.prefix,\n skip_outputs=self.skip_outputs,\n )\n return", "def E_Dynamic_MavkoEtAl2009(rhob,DTS,PR):\n E = (2*(rhob*1000)*((304800/DTS)**2)*(1+PR))/1000000\n return E", "def test_PerfectModelEnsemble_smooth_carries_lead_attrs(\r\n perfectModelEnsemble_initialized_control_1d_ym_cftime,\r\n):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smooth = pm.smooth({\"lead\": 4}, how=\"mean\")\r\n assert (\r\n pm_smooth.verify(metric=\"rmse\", comparison=\"m2e\", dim=\"init\").lead.attrs[\r\n \"units\"\r\n ]\r\n == \"years\"\r\n )", "def getMyValue(self):\n valueBV = 0.0\n valueCR = 0.0\n valueAL = 0.0\n valueEC = 0.0\n valueIA = 0.0\n factorAL = globals.cityCRGen/globals.cityALGen\n factorEC = globals.cityCRGen/globals.cityECGen\n factorIA = globals.cityCRGen/globals.cityIAGen\n ratio = self.strength/100.0\n valueCR += self.myDesign.costCR*ratio\n valueAL += self.myDesign.costAL*ratio\n valueEC += self.myDesign.costEC*ratio\n valueIA += self.myDesign.costIA*ratio\n valueBV += (valueCR +\n valueAL*factorAL +\n valueEC*factorEC +\n valueIA*factorIA) / 1000.0\n return (valueBV, valueCR, valueAL, valueEC, valueIA)", "def simulate_trading(self):\n self._generate_trading_instances()\n self._run_backtest()\n self.portfolio.output_equity()\n res=self.portfolio.get_statistics()\n self.plot.plot_equity()\n return res", "def year_demand_rule(_m, y):\r\n\r\n return sum(m.SCENARIO_DEMAND[y, s] for s in m.S)", "def calculate_activities(self):\n # Sleep\n sleep = self.sleep_hours * 0.95\n\n # Work\n if self.work_intensity == self.INTENSITY_LOW:\n work_factor = 1.5\n elif self.work_intensity == self.INTENSITY_MEDIUM:\n work_factor = 1.8\n else:\n work_factor = 2.2\n work = self.work_hours * work_factor\n\n # Sport (entered in hours/week, so we must divide)\n if self.sport_intensity == self.INTENSITY_LOW:\n sport_factor = 4\n elif self.sport_intensity == self.INTENSITY_MEDIUM:\n sport_factor = 6\n else:\n sport_factor = 10\n sport = (self.sport_hours / 7.0) * sport_factor\n\n # Free time\n if self.freetime_intensity == self.INTENSITY_LOW:\n freetime_factor = 1.3\n elif self.freetime_intensity == self.INTENSITY_MEDIUM:\n freetime_factor = 1.9\n else:\n freetime_factor = 2.4\n freetime = self.freetime_hours * freetime_factor\n\n # Total\n total = (sleep + work + sport + freetime) / 24.0\n return decimal.Decimal(str(total)).quantize(TWOPLACES)", "def take_one_averaged(self):\n self.na.set_center_frequency(6.160574e9)\n self.na.set_span(10e6)\n self.na.set_power(-5, 1)\n self.na.set_ifbw(1e3)\n\n self.na.set_query_timeout(40e3)\n set_format = self.na.set_format('polar')\n print \"set_format returned: \", set_format\n self.na.set_trigger_source(\"manual\")\n self.na.set_averages(10)\n self.na.set_trigger_average_mode()\n\n self.na.clear_averages(channel=1)\n self.na.trigger_single(channel=1)\n fpts, xs, ys = self.na.read_data()\n #\n plt.figure()\n plt.plot(fpts, xs)\n plt.plot(fpts, ys)\n plt.show()", "def annual_series(events):\n annually_series = pd.Series(data=events[COL.MAX_OVERLAPPING_SUM].values,\n index=events[COL.START].values,\n name=COL.MAX_OVERLAPPING_SUM).resample('AS').max()\n annually_series = annually_series.sort_values(ascending=False).reset_index(drop=True)\n\n mean_sample_rainfall = annually_series.mean()\n sample_size = annually_series.count()\n\n x = -np.log(np.log((sample_size + 0.2) / (sample_size - (annually_series.index.values + 1.0) + 0.6)))\n x_mean = x.mean()\n\n w = ((x * annually_series).sum() - sample_size * mean_sample_rainfall * x_mean) / \\\n ((x ** 2).sum() - sample_size * x_mean ** 2)\n u = mean_sample_rainfall - w * x_mean\n\n return {'u': u, 'w': w}", "def calc_savings_needed():\r\n annual_salary_real = float(input(\"Starting Salary: \"))\r\n #portion_saved = float(input(\"Enter the percent of your salary to save, as a decimal: \"))\r\n total_cost = 1000000#float(input(\"Enter the cost of your dream home: \"))\r\n semi_annual_raise = .07#float(input(\"Enter the semi annual raise as decimal: \"))\r\n portion_down_payment = .25\r\n current_savings = 0\r\n r = .04;\r\n monthly_salary = annual_salary_real / 12 \r\n \r\n months = 36\r\n epsilon = 100\r\n low = 0.0\r\n high = 1.0\r\n guess = (low + high) / 2.0\r\n num_guesses = 0\r\n breaked = False\r\n \r\n while(abs(current_savings - total_cost*portion_down_payment) > epsilon):\r\n if(guess == 1.0):\r\n breaked = True\r\n break\r\n #print(\"new guess: \",guess)\r\n annual_salary = annual_salary_real\r\n monthly_salary = annual_salary/12\r\n num_guesses += 1 \r\n \r\n month = 0\r\n current_savings = 0\r\n while( month < months):\r\n \r\n current_savings += (current_savings * r /12) + (guess * monthly_salary)\r\n month += 1\r\n if ((month % 6) == 0):\r\n annual_salary += annual_salary * semi_annual_raise\r\n monthly_salary = annual_salary / 12\r\n \r\n if(abs(current_savings - (total_cost*portion_down_payment)) > epsilon):\r\n #print(\"current_savings: \",current_savings)\r\n if(current_savings < (total_cost*portion_down_payment)):\r\n low = guess\r\n else:\r\n high = guess\r\n guess = (low + high) / 2.0\r\n \r\n if(breaked):\r\n print(\"It is not possible to pay down payment in 3 years.\")\r\n else: \r\n print(\"Best savings rate: \", guess)\r\n print(\"Steps in bisection search: \", num_guesses)", "def evaluate(self):\n # define start index test set\n start_test_set = int(len(self.X) * 2 / 3)\n\n # Different methods for cummulativa vs day-ahead forecasting\n if self.forecast_horizon == 1:\n # In sample\n lin_residuals_in_sample = self.y - (self.betas[0] + np.dot(self.X, self.betas[1]))\n self.rmse_in_sample = np.mean(lin_residuals_in_sample ** 2) ** 0.5\n self.var_in_sample = np.var(self.y)\n\n # Out of sample\n # Calculate MSE of wls-ev prediction\n self.mse_wlsev = np.mean((self.y[start_test_set:] - self.ols_predict()) ** 2)\n # Calculate MSE of benchmark prediction\n self.mse_benchmark = np.mean((self.y[start_test_set:] - self.benchmark_predict()) ** 2)\n else:\n # In Sample with betas estimated on full time series\n lin_residuals_in_sample = rolling_sum(self.y, self.forecast_horizon) - (\n self.betas[0] + np.dot(self.X[:-(self.forecast_horizon-1)], self.betas[1]))\n self.rmse_in_sample = np.mean(lin_residuals_in_sample ** 2) ** 0.5\n self.var_in_sample = np.var(rolling_sum(self.y, self.forecast_horizon))\n\n # Out of sample\n # calculate realized cummulative returns over forecast horizon sequences\n cum_rets_realized = rolling_sum(self.y[start_test_set:], self.forecast_horizon)\n # Calculate MSE of wls-ev prediction, only where realized values are available\n self.mse_wlsev = np.mean((cum_rets_realized - self.ols_predict()[:-(self.forecast_horizon-1)]) ** 2)\n # Calculate MSE of benchmark prediction, only where realized values are available\n self.mse_benchmark = np.mean(\n (cum_rets_realized - self.benchmark_predict()[:-(self.forecast_horizon-1)]) ** 2)\n\n # Calculate out of sample r-squared\n self.oos_r_squared = 1 - (self.mse_wlsev / self.mse_benchmark)\n # Calculate in sample r-squared\n self.in_sample_r_squared = 1.0 - (self.rmse_in_sample ** 2) / self.var_in_sample", "def annual_dividend(self) -> float:\n return self._annual_dividend", "def calc_year_based_saving_capacities(values, group, group_people_ratio):\n column = 'income_{}{}'.format(group, group_people_ratio)\n for index, obj in enumerate(values):\n if index < len(values) - 1:\n next_obj = values[index + 1]\n # between below years calculation\n years = (obj['year'], next_obj['year'])\n\n # find year specific income distribution for the one person who\n # belong the regarding group\n current_per_people_ratio = obj[column] / group_people_ratio\n next_per_people_ratio = next_obj[column] / group_people_ratio\n\n diff = next_per_people_ratio - current_per_people_ratio\n saving_capacity = diff / current_per_people_ratio\n yield {'year': years, 'savingcapacity': saving_capacity}", "def get_attendance(self):\n\n if len(self.attendance_list):\n attendance_sum = 0\n for attendance in self.attendance_list:\n attendance_sum += attendance.attendance_state\n return attendance_sum/len(self.attendance_list) * 100\n\n else:\n return 100.0", "def calculate_economics(\n irradiance: pd.DataFrame, temperature: pd.DataFrame, wind_speed: pd.DataFrame,\n CECMod: pd.DataFrame, configuration: float = 1\n ):\n p_out = calculate_dc_output(irradiance, temperature, wind_speed, CECMod=CECMod)\n\n # convert dc to AC - considering a flat loss of 14%\n # we have to improve this in the future\n p_out = [v * 0.86 for v in p_out]\n\n day_count = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n monthly_electricity = []\n\n for month in range(12):\n st_index = sum(day_count[:month + 1]) * 24\n end_index = sum(day_count[:month + 2]) * 24\n data = p_out[st_index: end_index]\n # Note: division by 50 is to match the values - remove it later!\n monthly_electricity.append(sum(data) / len(data) / 50)\n\n total_ac_energy = sum(p_out)\n monthly_ac_energy = pd.DataFrame(\n zip(calendar.month_abbr[1:], monthly_electricity),\n columns=['month', 'Thousand kWh']\n )\n\n # Based on the example here: https://nrel-pysam.readthedocs.io/en/master/Import.html\n\n grid = Grid.default(\"PVWattsCommercial\")\n ur = UtilityRate.from_existing(grid, \"PVWattsCommercial\")\n cl = Cashloan.from_existing(grid,\"PVWattsCommercial\")\n\n sam_data = read_sam_data(configuration)\n for module, data in zip([grid, ur, cl], sam_data[:-1]):\n for k, v in data.items():\n if k == 'number_inputs':\n continue\n try:\n module.value(k, v)\n except AttributeError:\n print(module, k, v)\n\n\n grid.SystemOutput.gen = p_out\n\n grid.execute()\n ur.execute()\n cl.execute()\n\n # list possible outputs here\n adjusted_installed_cost = cl.Outputs.adjusted_installed_cost\n payback_cash_flow = [-1 * x for x in cl.Outputs.cf_discounted_payback]\n\n return total_ac_energy, monthly_ac_energy, adjusted_installed_cost, payback_cash_flow", "def annualisedReturn(totalReturn, years):\n\t# (1 + totalReturn) ^ (1 / Time(years)) -1\n\tif totalReturn < 0:\n\t\treturn 0.0 - annualisedReturn(abs(totalReturn), years)\n\telse:\n\t\ttotalReturn = totalReturn / 100\n\t\tarr = (1.0 + totalReturn) ** (1.0 / years) - 1\n\t\treturn arr * 100", "def calculate(self):\r\n if self.__calculation_type == self.__DIFFERENTIATED_PAY:\r\n for month in range(1, self.__principal_term+1):\r\n self.__differentiated_pay.append(\r\n ceil(\r\n (self.__credit_principal/self.__principal_term)\r\n + self.__credit_interest*(self.__credit_principal\r\n - (self.__credit_principal\r\n * (month-1))\r\n / self.__principal_term)\r\n )\r\n )\r\n self.__overpayment = sum(self.__differentiated_pay) - self.__credit_principal\r\n\r\n for i, dp in enumerate(self.__differentiated_pay, 1):\r\n print(f'Month {i}: paid out {dp}')\r\n print()\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n elif self.__calculation_type == self.__ANNUITY:\r\n if self.__user_choice == self.__SEEK_ANNUITY_MONTHLY:\r\n self.__annuity_monthly = ceil(\r\n self.__credit_principal * ((self.__credit_interest\r\n * pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = (self.__annuity_monthly * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n print(f'Your annuity payment = {self.__annuity_monthly}!')\r\n\r\n elif self.__user_choice == self.__SEEK_TERM:\r\n self.__principal_term = ceil(\r\n log(self.__annuity_monthly / (self.__annuity_monthly\r\n - (self.__credit_interest\r\n * self.__credit_principal))\r\n , 1+self.__credit_interest)\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n years = self.__principal_term // 12\r\n months = self.__principal_term % 12\r\n\r\n print(f'You need {years} year{\"s\" if self.__principal_term > 1 else \"\"}'\r\n f'{\" and \" + str(months) + \" months\" if months > 0 else \"\"}'\r\n f' to repay this credit!')\r\n\r\n elif self.__user_choice == self.__SEEK_CREDIT_PRINCIPAL:\r\n self.__credit_principal = ceil(\r\n self.__annuity_monthly\r\n / ((self.__credit_interest\r\n * pow(1+self.__credit_interest, self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest, self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal)\r\n\r\n print(f'Your credit principal = {self.__credit_principal}!')\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n else:\r\n print('Incorrect parameters')\r\n self.usage()", "def portfolio_analytics(port_returns, market_returns):\n\n # add the intercept to the model\n x2 = sm.add_constant(market_returns)\n\n # train the model\n estimator = sm.OLS(port_returns, x2)\n model = estimator.fit()\n\n # get portfolio analytics\n alpha, beta = model.params\n r_squared = model.rsquared\n regression = model.predict()\n\n return alpha, beta, r_squared, regression", "def portfolio_analytics(port_returns, market_returns):\n\n # add the intercept to the model\n x2 = sm.add_constant(market_returns)\n\n # train the model\n estimator = sm.OLS(port_returns, x2)\n model = estimator.fit()\n\n # get portfolio analytics\n alpha, beta = model.params\n r_squared = model.rsquared\n regression = model.predict()\n\n return alpha, beta, r_squared, regression", "def predict(self):\n # Input validation\n if 'halflife' not in self.cfg or 'min_periods' not in self.cfg:\n raise ValueError('SingleStockEWM: Model config requires both min_periods (periods backwards) and a '\n 'halflife (decay of historical values, periods to half original value) to run.')\n\n # ## Load up model configs\n halflife = self.cfg['halflife']\n min_periods = self.cfg['min_periods']\n\n # ## Estimates\n # Returns\n realized_returns = self.get('returns', data_type='realized', sampling_freq=self.cfg['returns']['sampling_freq'])\n realized_volumes = self.get('volumes', data_type='realized', sampling_freq=self.cfg['returns']['sampling_freq'])\n realized_sigmas = self.get('sigmas', data_type='realized', sampling_freq=self.cfg['returns']['sampling_freq'])\n logging.info(\"Typical variance of returns: %g\" % realized_returns.var().mean())\n\n self.set('returns', realized_returns.ewm(halflife=halflife, min_periods=min_periods).mean().shift(1).dropna(),\n 'predicted')\n\n # Volumes & sigmas\n self.set('volumes', realized_volumes.ewm(halflife=halflife, min_periods=min_periods).mean().shift(1).dropna(),\n 'predicted')\n self.set('sigmas', realized_sigmas.shift(1).dropna(), 'predicted')\n\n # Covariance\n if 'covariance' not in self.cfg:\n raise NotImplemented('Covariance section needs to be defined under SS EWM model config.')\n elif self.cfg['covariance']['method'] == 'SS':\n self.set('covariance', realized_returns.ewm(halflife=halflife, min_periods=min_periods).cov().\n shift(realized_returns.shape[1]).dropna(), 'predicted', self.cfg['covariance']['sampling_freq'])\n elif self.cfg['covariance']['method'] == 'FF5':\n # Fetch data\n ff_returns = self.get('ff_returns', 'realized', SamplingFrequency.DAY)\n realized_returns = self.get('returns', data_type='realized', sampling_freq=SamplingFrequency.DAY)\n\n update = self.cfg['covariance']['update'] if 'update' in self.cfg['covariance'] else 'monthly'\n if update == 'quarterly':\n update_freq = '3M'\n elif update == 'monthly':\n update_freq = 'M'\n elif update == 'biweekly':\n update_freq = '2W'\n elif update == 'weekly':\n update_freq = 'W'\n else:\n raise NotImplemented('Update freq under covariance only supports: month, biweekly, weekly.')\n\n # Generate computation frequency\n first_days = pd.date_range(start=realized_returns.index[max(self.cfg['min_periods'] + 1, 90)],\n end=realized_returns.index[-1],\n freq=update_freq)\n days_back = self.cfg['covariance']['train_days'] if 'train_days' in self.cfg['covariance'] else 90\n\n # Use ML regression to obtain factor loadings. Then factor covariance and stock idiosyncratic variances\n exposures, factor_sigma, idyos = {}, {}, {}\n\n # Every first day in each biweekly period\n cov_rscore = []\n for day in first_days:\n logging.info('Running for {}'.format(day.strftime('%Y %b %d')))\n\n # Grab asset returns for preceding train_days (90 by default)\n used_returns = realized_returns.loc[(realized_returns.index < day) &\n (realized_returns.index >= day - pd.Timedelta(str(days_back) + \" days\"))]\n used_ff_returns = ff_returns.loc[ff_returns.index.isin(used_returns.index)].iloc[:, :-1]\n\n # Multi linear regression to extract factor loadings\n mlr = linear_model.LinearRegression()\n mlr.fit(used_ff_returns, used_returns)\n mlr.predict(used_ff_returns)\n\n # Track performance of FF fit\n # rscore = metrics.r2_score(used_ff_returns, used_returns)\n cov_rscore.append(0)\n #p rint('predict_cov_FF5: mlr score = {s}'.format(s=rscore))\n\n # Factor covariance - on FF returns\n factor_sigma[day] = used_ff_returns.cov().fillna(0)\n # Exposures - factor loadings obtained from multi linear regression coefficients of stock on FF factors\n exposures[day] = pd.DataFrame(data=mlr.coef_, index=realized_returns.columns).fillna(0)\n # Stock idiosyncratic variances - stock var minus FF var, ensure >=0\n idyos[day] = pd.Series(np.diag(used_returns.cov().values -\n exposures[day].values @ factor_sigma[day].values @ exposures[\n day].values.T),\n index=realized_returns.columns).fillna(method='ffill')\n idyos[day][idyos[day] < 0] = 0\n\n self.set('factor_sigma', pd.concat(factor_sigma.values(), axis=0, keys=factor_sigma.keys()), 'predicted')\n self.set('exposures', pd.concat(exposures.values(), axis=0, keys=exposures.keys()), 'predicted')\n self.set('idyos', pd.DataFrame(idyos).T, 'predicted')\n self.set('cov_rscore', pd.DataFrame.from_dict({'date': first_days,\n 'rscore': cov_rscore,\n 'train_days': days_back}), 'predicted')\n\n else:\n raise NotImplemented('Covariance section needs to be defined under SSEWM moodel config and needs definition'\n ' of method: SS (single stock returns) or FF5 (Fama French 5 factor returns).')", "def effecticeInterestRate():\n rate = float(input(\"What is your interest rate:\\n\"))\n compound = int(input(\"How many times in a year you give interest:\\n\"))\n\n EIR = (1 + ((rate/100)/compound))**compound - 1\n eir = EIR*100\n return \"Your effective interest rate is: %.3f\" % eir", "def year_emissions_rule(_m, y):\r\n\r\n return sum(m.SCENARIO_EMISSIONS[y, s] for s in m.S)", "def eta_scan_averages(self):\n return self._eta_scan_averages", "def rta():\r\n x = float(input(\"Please Enter Net Income Value: \"))\r\n y = float(input(\"Please Enter Interest Expense Value: \"))\r\n z = float(input(\"Please Enter Beginning Total Assets Value: \"))\r\n w = float(input(\"Please Enter Ending Total Assets Value: \"))\r\n d = ((float(x)+float(y)) / ((float(z)+float(w)) / float(2))) * float(100)\r\n print \">> Your Rate of Return on Total Assets is\",round(d,1),\"%\"", "def dynamic(self):\n # FrostIndexChangeRate=-(1-Afrost)*FrostIndex - Tavg*exp(-0.04*Kfrost*SnowCover/SnowWaterEquivalent);\n\n FrostIndexChangeRate = -(1 - self.var.Afrost) * self.var.FrostIndex - self.var.Tavg * \\\n np.exp(-0.04 * self.var.Kfrost * self.var.SnowCover / self.var.SnowWaterEquivalent)\n # FrostIndexChangeRate=self.var.AfrostIndex - self.var.Tavg* pcraster.exp(self.var.Kfrost*self.var.SnowCover*self.var.InvSnowWaterEquivalent)\n # Rate of change of frost index (expressed as rate, [degree days/day])\n # CHANGED 9 September 2004:\n # - first term should be negative\n # - second term should be subtracted, not added!!\n\n self.var.FrostIndex = np.maximum(self.var.FrostIndex + FrostIndexChangeRate * self.var.DtDay, 0)\n # frost index in soil [degree days]\n # based on Molnau and Bissel (1983, A Continuous Frozen Ground Index for Flood\n # Forecasting. In: Maidment, Handbook of Hydrology, p. 7.28, 7.55)\n # if Tavg is above zero, FrostIndex will stay 0\n # if Tavg is negative, FrostIndex will increase with 1 per degree C per day\n # Exponent of 0.04 (instead of 0.4 in HoH): conversion [cm] to [mm]!\n # Division by SnowDensity because SnowDepth is expressed as equivalent water\n # depth(always less than depth of snow pack)\n # SnowWaterEquivalent taken as 0.100 (based on density of 100 kg/m3) (Handbook of Hydrology, p. 7.5)\n # Afrost, (daily decay coefficient) is taken as 0.97 (Handbook of Hydrology,\n # p. 7.28)\n # Kfrost, (snow depth reduction coefficient) is taken as 0.57 [1/cm],\n # (HH, p. 7.28)" ]
[ "0.7408717", "0.6447516", "0.61507213", "0.56581444", "0.56386214", "0.56319666", "0.56268173", "0.5622094", "0.55830747", "0.55610895", "0.55515134", "0.5541858", "0.5536161", "0.5518753", "0.54845977", "0.5477718", "0.54517406", "0.54387516", "0.54357356", "0.54199076", "0.53974795", "0.5387261", "0.5361069", "0.53575367", "0.53433263", "0.5341639", "0.5339729", "0.5320629", "0.5319645", "0.5316315", "0.53153235", "0.5295949", "0.52666396", "0.52666396", "0.5266586", "0.5239393", "0.5233764", "0.5228951", "0.52226526", "0.5217482", "0.5217482", "0.5217169", "0.5216569", "0.52122873", "0.52082634", "0.5202722", "0.5200642", "0.51988065", "0.5198485", "0.5187398", "0.5179382", "0.5176764", "0.5167867", "0.5161652", "0.51588774", "0.51483923", "0.5145924", "0.51449007", "0.51393056", "0.51257706", "0.51246154", "0.5121864", "0.5119257", "0.5108225", "0.5107774", "0.509725", "0.50881255", "0.5067038", "0.5064662", "0.506426", "0.5062684", "0.5062404", "0.50463057", "0.5044376", "0.5043239", "0.50304496", "0.5028294", "0.502356", "0.50152874", "0.50113976", "0.500755", "0.4999358", "0.49966875", "0.49964982", "0.49891138", "0.49870747", "0.4985805", "0.4985604", "0.49834976", "0.49802232", "0.4977214", "0.49753782", "0.4959081", "0.4959081", "0.4958628", "0.49513942", "0.4951003", "0.49431416", "0.49420258", "0.49399495" ]
0.70783615
1
Calculate annual heating savings created by the project. Attributes
def calc_annual_heating_savings (self): price = self.diesel_prices + self.cd['heating fuel premium'] maintenance = self.comp_specs['heat recovery o&m'] self.annual_heating_savings = -1 * \ (maintenance + (self.lost_heat_recovery * price))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def annualized_perf(self):\n mean_return = round(self.data.log_returns.mean() * 252, 4)\n risk = round(self.data.log_returns.std() * np.sqrt(252), 4)\n print(\"Return: {} | Risk: {}\".format(mean_return, risk))", "def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0", "def annual_fee(self, working_months, year, with_bpjs=True):\n monthly_bpjs = []\n\n total_salary = self.base_salary\n if self.is_salary_allowances is True:\n fixed_allowances = self.summarize( self.fixed_allowances )\n non_fixed_allowances = self.summarize( self.non_fixed_allowances )\n total_salary = total_salary + non_fixed_allowances + fixed_allowances\n #end if\n\n # initialize variable for storing the annual bpjs\n annual_c_old_age_insurance = 0\n annual_i_old_age_insurance = 0\n annual_c_pension_insurance = 0\n annual_i_pension_insurance = 0\n annual_c_health_insurance = 0\n annual_i_health_insurance = 0\n annual_death_insurance = 0\n annual_accident_insurance = 0\n\n if with_bpjs is True:\n # only calculate bpjs if is enabled and automatically set everthing to zero when is false\n start_month = 1\n for month in range(start_month, working_months+1):\n\n company_old_age_insurance = 0\n individual_old_age_insurance = 0\n if self.old_age_insurance_status is True:\n company_old_age_insurance = \\\n self._company_old_age_insurance(total_salary)\n\n individual_old_age_insurance = \\\n self._individual_old_age_insurance(total_salary)\n #end if\n\n company_pension_insurance = 0\n individual_pension_insurance = 0\n if self.pension_insurance_status is True:\n company_pension_insurance = \\\n self._company_pension_insurance(total_salary, month, year)\n\n individual_pension_insurance = \\\n self._individual_pension_insurance(total_salary, month, year)\n #end if\n\n company_health_insurance = 0\n individual_health_insurance = 0\n if self.health_insurance_status is True:\n company_health_insurance = \\\n self._company_health_insurance(total_salary)\n\n individual_health_insurance = \\\n self._individual_health_insurance(total_salary)\n #end if\n\n death_insurance = 0\n if self.death_insurance_status is True:\n death_insurance = self._death_insurance(total_salary)\n #end if\n\n accident_insurance = 0\n if self.accident_insurance_status is True:\n accident_insurance = \\\n self._accident_insurance(total_salary, \\\n self.industry_risk_rate)\n #end if\n\n monthly = {\n \"old_age_insurance\" : {\n \"company\" : company_old_age_insurance,\n \"individual\" : individual_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : company_pension_insurance,\n \"individual\" : individual_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : company_health_insurance,\n \"individual\" : individual_health_insurance,\n },\n \"death_insurance\" : death_insurance,\n \"accident_insurance\" : accident_insurance\n }\n\n monthly_bpjs.append(monthly)\n\n annual_c_old_age_insurance = annual_c_old_age_insurance \\\n + company_old_age_insurance\n\n annual_i_old_age_insurance = annual_i_old_age_insurance \\\n + individual_old_age_insurance\n\n annual_c_pension_insurance = annual_c_pension_insurance \\\n + company_pension_insurance\n\n annual_i_pension_insurance = annual_i_pension_insurance \\\n + individual_pension_insurance\n\n annual_c_health_insurance = annual_c_health_insurance \\\n + company_health_insurance\n\n annual_i_health_insurance = annual_i_health_insurance \\\n + individual_health_insurance\n\n annual_death_insurance = annual_death_insurance\\\n + death_insurance\n\n annual_accident_insurance = annual_accident_insurance\\\n + accident_insurance\n #end for\n\n annual_bpjs = {\n \"old_age_insurance\" : {\n \"company\" : annual_c_old_age_insurance,\n \"individual\" : annual_i_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : annual_c_pension_insurance,\n \"individual\" : annual_i_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : annual_c_health_insurance,\n \"individual\" : annual_i_health_insurance,\n },\n \"death_insurance\" : annual_death_insurance,\n \"accident_insurance\" : annual_accident_insurance\n }\n return annual_bpjs", "def calculate(self):\n self._emi_months = self._period * 12\n self._total_interest = math.ceil(self._loan_amount * self._period * self._rate / 100)\n self._total_amount_pi = float(self._loan_amount + self._total_interest)\n self._emi_amount = math.ceil(self._total_amount_pi / self._emi_months)\n return self", "def annual_update(self, state, weather, time):\n soil = state.soil\n crop_type = state.crop.current_crop\n animal_management = state.animal_management\n feed = state.feed\n\n soil.annual_mass_balance()\n\n for variable in self.annual_variables:\n self.annual_variables[variable][2] = \\\n eval(self.annual_variables[variable][0], globals(), locals())", "def __init__(self, total_cost, ann_rate, ann_salary, portion_saved):\r\n\t\tself.total_cost = total_cost\r\n\t\tself.portion_down_payment = total_cost*0.25\r\n\t\tself.ann_rate = ann_rate\r\n\t\tself.monthly_salary = ann_salary/12\r\n\t\tself.portion_saved = portion_saved\r\n\t\tself.current_savings = [0.0,]\r\n\t\tself.months = 0\r\n\t\tself.new_saving = 0", "def annual_energy(self):\n return self['annual_energy']", "def annualized_return_risk(vals):\n P = 252\n v = np.array(vals)\n vt1 = v[1:]\n vt = v[:-1]\n rets = (vt1-vt)/vt\n \n ann_return = np.mean(rets)*P\n ann_risk = np.std(rets)*np.sqrt(P)\n \n return ann_return, ann_risk", "def _ebit(self):\n return self.net_income + self.tax_expense + self.interest_expense", "def monthly_fee(self):\n total_salary = self.base_salary\n if self.is_salary_allowances is True:\n fixed_allowances = self.summarize( self.fixed_allowances )\n non_fixed_allowances = self.summarize( self.non_fixed_allowances )\n total_salary = total_salary + non_fixed_allowances + fixed_allowances\n #end if\n\n company_old_age_insurance = 0\n individual_old_age_insurance = 0\n if self.old_age_insurance_status is True:\n company_old_age_insurance = \\\n self._company_old_age_insurance(total_salary)\n\n individual_old_age_insurance = \\\n self._individual_old_age_insurance(total_salary)\n #end if\n\n company_pension_insurance = 0\n individual_pension_insurance = 0\n if self.pension_insurance_status is True:\n company_pension_insurance = \\\n self._company_pension_insurance(total_salary)\n\n individual_pension_insurance = \\\n self._individual_pension_insurance(total_salary)\n #end if\n\n company_health_insurance = 0\n individual_health_insurance = 0\n if self.health_insurance_status is True:\n company_health_insurance = \\\n self._company_health_insurance(total_salary)\n\n individual_health_insurance = \\\n self._individual_health_insurance(total_salary)\n #end if\n\n death_insurance = 0\n if self.death_insurance_status is True:\n death_insurance = self._death_insurance(total_salary)\n #end if\n\n accident_insurance = 0\n if self.accident_insurance_status is True:\n accident_insurance = \\\n self._accident_insurance(total_salary, \\\n self.industry_risk_rate)\n #end if\n\n monthly = {\n \"old_age_insurance\" : {\n \"company\" : company_old_age_insurance,\n \"individual\" : individual_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : company_pension_insurance,\n \"individual\" : individual_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : company_health_insurance,\n \"individual\" : individual_health_insurance,\n },\n \"death_insurance\" : death_insurance,\n \"accident_insurance\" : accident_insurance\n }\n return monthly", "def genMarketStat(self):\n myMarketStat = marketstat.MarketStat({'id':str(self.currentRound)})\n self.marketStats[str(self.currentRound)] = myMarketStat\n # set avg price to last rounds market avg price\n if self.currentRound > 1:\n lastMarketStat = self.marketStats[str(self.currentRound-1)]\n myMarketStat.avgSoldAL = lastMarketStat.avgSoldAL\n myMarketStat.avgSoldEC = lastMarketStat.avgSoldEC\n myMarketStat.avgSoldIA = lastMarketStat.avgSoldIA", "def compute (self):\r\n #obtain and validate the inputs\r\n startBalance = self.amount.getNumber()\r\n rate = self.rate.getNumber()/100\r\n years = self.period.getNumber()\r\n if startBalance == 0 or rate == 0 or years == 0:\r\n return\r\n #set the header for the table\r\n result = \"%4s%18s%10s%16s\\n\" % (\"Year\", \"Starting Balance\", \"Interest\", \"Ending Balance\")\r\n #Compute and append the results for each year\r\n totalInterest = 0.0\r\n for year in range (1, years + 1):\r\n interest = startBalance * rate\r\n endBalance = startBalance + interest\r\n result += \"%4d%18.2f%10.2f%16.2f\\n\" % (year, startBalance, interest, endBalance)\r\n #the ending balance for year 1 wil lbe the starting balance for year 2 and so on\r\n startBalance = endBalance\r\n totalInterest += interest\r\n #Append the totals for the entire period - final output for the whole thing\r\n result += \"Ending Balance: $%0.2f\\n\" % endBalance\r\n result += \"Total interest earned: $%0.2f\\n\" % totalInterest\r\n #Output the result while preserving read-only status\r\n self.outputArea[\"state\"] = \"normal\"\r\n self.outputArea.setText(result)\r\n self.outputArea[\"state\"] = \"disabled\"", "def years_to_pay(self) -> float:\n return round(self.term / self.term_multiplier * self.n_periods / 12, 1)", "def average(self, returns):\r\n return returns.mean() * self.day", "def annualized_volatility(self):\n return self.daily_std() * math.sqrt(252)", "def annual_summary(self):\n \n #Initialize dict with info about all of year's storms\n hurdat_year = {'id':[],'operational_id':[],'name':[],'max_wspd':[],'min_mslp':[],'category':[],'ace':[]}\n \n #Search for corresponding entry in keys\n count_ss_pure = 0\n count_ss_partial = 0\n iterate_id = 1\n for key in self.dict.keys():\n\n #Retrieve info about storm\n temp_name = self.dict[key]['name']\n temp_vmax = np.array(self.dict[key]['vmax'])\n temp_mslp = np.array(self.dict[key]['mslp'])\n temp_type = np.array(self.dict[key]['type'])\n temp_time = np.array(self.dict[key]['date'])\n temp_ace = self.dict[key]['ace']\n\n #Get indices of all tropical/subtropical time steps\n idx = np.where((temp_type == 'SS') | (temp_type == 'SD') | (temp_type == 'TD') | (temp_type == 'TS') | (temp_type == 'HU'))\n\n #Get times during existence of trop/subtrop storms\n if len(idx[0]) == 0: continue\n trop_time = temp_time[idx]\n if 'season_start' not in hurdat_year.keys():\n hurdat_year['season_start'] = trop_time[0]\n hurdat_year['season_end'] = trop_time[-1]\n\n #Get max/min values and check for nan's\n np_wnd = np.array(temp_vmax[idx])\n np_slp = np.array(temp_mslp[idx])\n if len(np_wnd[~np.isnan(np_wnd)]) == 0:\n max_wnd = np.nan\n max_cat = -1\n else:\n max_wnd = int(np.nanmax(temp_vmax[idx]))\n max_cat = convert_category(np.nanmax(temp_vmax[idx]))\n if len(np_slp[~np.isnan(np_slp)]) == 0:\n min_slp = np.nan\n else:\n min_slp = int(np.nanmin(temp_mslp[idx]))\n\n #Append to dict\n hurdat_year['id'].append(key)\n hurdat_year['name'].append(temp_name)\n hurdat_year['max_wspd'].append(max_wnd)\n hurdat_year['min_mslp'].append(min_slp)\n hurdat_year['category'].append(max_cat)\n hurdat_year['ace'].append(temp_ace)\n hurdat_year['operational_id'].append(self.dict[key]['operational_id'])\n \n #Handle operational vs. non-operational storms\n\n #Check for purely subtropical storms\n if 'SS' in temp_type and True not in np.isin(temp_type,['TD','TS','HU']):\n count_ss_pure += 1\n\n #Check for partially subtropical storms\n if 'SS' in temp_type:\n count_ss_partial += 1\n\n #Add generic season info\n hurdat_year['season_storms'] = len(hurdat_year['name'])\n narray = np.array(hurdat_year['max_wspd'])\n narray = narray[~np.isnan(narray)]\n hurdat_year['season_named'] = len(narray[narray>=34])\n hurdat_year['season_hurricane'] = len(narray[narray>=65])\n hurdat_year['season_major'] = len(narray[narray>=100])\n hurdat_year['season_ace'] = np.sum(hurdat_year['ace'])\n hurdat_year['season_subtrop_pure'] = count_ss_pure\n hurdat_year['season_subtrop_partial'] = count_ss_partial\n \n #Return object\n return hurdat_year", "def calculate_profit(self):", "def __init__(self):\n self.annual_interest_rate = 10.0 / 100.0\n self.initial_loan_date = date(2014, 12, 1)\n self.currency = 'HKD'\n self.total_loan_amount = Money('100000.00', 'HKD')\n self.final_payment_date = self.initial_loan_date + \\\n relativedelta(years=1)", "def calcAnnualWeightedAveInsolation(latitude, slope, azimuth):\n\tdf = calcTotalInsolation(latitude, slope, azimuth)\n\treturn np.dot(\n\t\tnp.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]),\n\t\tdf['insolation_tilted']\n\t\t) / 365.0", "def batting_average(df,start_year,end_year,bat_met,player_name):\n\n base_fields = ['H','AB']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n return round(df['H'].sum(axis = 0) / df['AB'].sum(axis = 0),3)\n\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n return round(df['H'].sum(axis = 0) / df['AB'].sum(axis = 0),3)", "def yearlyDepreciation():\n return .10", "def averageTime(self):\n \n pass", "def year_average_price_rule(_m, y):\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)", "def avg_annual_returns(end_of_year_returns, mstat):\n\n # imports mean stats\n from scipy.stats import mstats\n\n # converts returns dict to an array (in decimal fmt)\n returns_arr = np.array(list(end_of_year_returns.values()))/100\n\n if mstat == 'geometric':\n\n # calculates the geometric mean\n gmean_returns = (mstats.gmean(1 + returns_arr) - 1)*100\n\n return round(gmean_returns, 2)\n\n if mstat == 'arithmetic':\n\n # calculates the arithmetic mean\n mean_returns = np.mean(returns_arr)\n\n return round(mean_returns, 2)", "def annual_series(events):\n annually_series = pd.Series(data=events[COL.MAX_OVERLAPPING_SUM].values,\n index=events[COL.START].values,\n name=COL.MAX_OVERLAPPING_SUM).resample('AS').max()\n annually_series = annually_series.sort_values(ascending=False).reset_index(drop=True)\n\n mean_sample_rainfall = annually_series.mean()\n sample_size = annually_series.count()\n\n x = -np.log(np.log((sample_size + 0.2) / (sample_size - (annually_series.index.values + 1.0) + 0.6)))\n x_mean = x.mean()\n\n w = ((x * annually_series).sum() - sample_size * mean_sample_rainfall * x_mean) / \\\n ((x ** 2).sum() - sample_size * x_mean ** 2)\n u = mean_sample_rainfall - w * x_mean\n\n return {'u': u, 'w': w}", "def calc_average_load (self):\n #~ self.generation = self.forecast.generation_by_type['generation diesel']\\\n #~ [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]", "def runRandomEntryStrat(self):\n start, end = self.randomDays()\n \n gain = (self.df.adj_close[end] - getInfl(self.df.adj_close[start], start.year, end.year)) / \\\n getInfl(self.df.adj_close[start], start.year, end.year)\n #if gain > 6:\n # print \"Windfall: \", start, end, gain\n return gain", "def arithmetic_ret(self) -> float:\n return float(np.log(self.tsdf).diff().mean() * self.periods_in_a_year)", "def annual_dividend(self) -> float:\n return self._annual_dividend", "def generate_organisation_addition(self):\n\t\treserved_columns = list()\n\t\ttotal_attandance = list()\n\t\tn = list()\n\t\tfor column in self.days[0].data.columns:\n\t\t\tif column.startswith('reserved_'):\n\t\t\t\treserved_columns.append(column)\n\t\t\t\ttotal_attandance.append(0)\n\t\t\t\tn.append(0)\n\n\t\tfor day in self.days:\n\t\t\tfor index, row in day.data.iterrows():\n\t\t\t\tfor i, column in enumerate(reserved_columns):\n\t\t\t\t\tif int(row[column]) > 0:\n\t\t\t\t\t\tweekend = True\n\t\t\t\t\t\tif int(row['day_of_week']) < 5:\n\t\t\t\t\t\t\tweekend = False\n\t\t\t\t\t\ttotal_attandance[i] += row['pool'] - self.get_average_for_month_at_time(int(row['month'])-1, int(row['hour']), int(row['minute']), weekend)\n\t\t\t\t\t\tn[i] += 1\n\n\t\tself.org_addition = dict()\n\t\tfor i, column in enumerate(reserved_columns):\n\t\t\tif n[i] > 0:\n\t\t\t\tself.org_addition[column] = total_attandance[i]/n[i]\n\t\t\telse:\n\t\t\t\tself.org_addition[column] = 0", "def calc_av_daily_return(self):\n av_return = 0.0\n total_ret = sum(self._returns)\n num_averages = len(self._returns)\n \n if num_averages > 0:\n av_return = total_ret/float(num_averages)\n \n self._av_daily_return = av_return\n return av_return", "def annualisedReturn(totalReturn, years):\n\t# (1 + totalReturn) ^ (1 / Time(years)) -1\n\tif totalReturn < 0:\n\t\treturn 0.0 - annualisedReturn(abs(totalReturn), years)\n\telse:\n\t\ttotalReturn = totalReturn / 100\n\t\tarr = (1.0 + totalReturn) ** (1.0 / years) - 1\n\t\treturn arr * 100", "def get_attendance(self):\n\n if len(self.attendance_list):\n attendance_sum = 0\n for attendance in self.attendance_list:\n attendance_sum += attendance.attendance_state\n return attendance_sum/len(self.attendance_list) * 100\n\n else:\n return 100.0", "def incumbant_firm(self, wage):\n \n \n \n # a. demand for capital (capital policy function)\n pol_k = (self.alpha /(self.ret *(1+self.tau_capital)))**((1-self.gamma)/(1-self.gamma-self.alpha)) \\\n * (self.gamma /(wage * (1+self.tau_labor)))**(self.gamma/(1-self.gamma-self.alpha)) \\\n * (self.grid_s_matrix*(1-self.tau_output))**(1/(1-self.alpha-self.gamma))\n \n # b. demand of labor (labor policy function)\n pol_n = (1+self.tau_capital) * self.ret * self.gamma / ((1+self.tau_labor) * wage * self.alpha) * pol_k\n #pol_n = ((smatrix*(1-self.tau_output) * gamma) / wage)**(1/(1-gamma)) * pol_k**(alpha/(1-gamma))\n \n # c. incumbant profit\n pi=(1-self.tau_output) * self.grid_s_matrix * pol_k**self.alpha * pol_n**self.gamma \\\n - (1+self.tau_labor)* wage * pol_n - (1+self.tau_capital) * self.ret * pol_k - self.cf\n \n # d. discounted present value of an incumbent establishment, W(s,pol_k(s,theta))\n W = pi / (1-self.rho)\n \n return pol_k, pol_n, pi, W", "def compute(self):\r\n #obtain and validate the inputs\r\n startBalance = self.amount.getNumber()\r\n rate = self.rate.getNumber() / 100\r\n years = self.period.getNumber()\r\n if startBalance == 0 or rate == 0 or years == 0:\r\n return\r\n \r\n #Set the header for the table\r\n result = \"%4s%18s%10s%16s\\n\" % (\"Year\", \"Starting Balance\", \"Interest\", \"Ending Balance\")\r\n\r\n #Compute and apend the results for each year\r\n totalInterest = 0.0\r\n for year in range(1, years + 1):\r\n interest = startBalance * rate\r\n endBalance = startBalance + interest\r\n result += \"%4d%18.2f%10.2f%16.2f\\n\" % (year, startBalance, interest, endBalance)\r\n startBalance = endBalance\r\n totalInterest += interest\r\n\r\n #Append the totals for the period\r\n result += \"Ending balance: $%0.2f\\n\" % endBalance\r\n result += \"Total interest earned: $%0.2f\\n\" % totalInterest\r\n\r\n #Output the results while preserving read-only status\r\n self.outputArea[\"state\"] = \"normal\"\r\n self.outputArea.setText(result)\r\n self.outputArea[\"state\"] = \"disabled\"", "def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]\n #~ print 'self.average_load',self.average_load", "def test_period_average():\n\n time_point = datetime(2012, 12, 31)\n period = 25\n spy = DEFAULT_ASSET_FACTORY.make_asset(\"SPY\")\n\n weatherman = weathermen.period_average(CALENDAR)\n forecast = weatherman(DEFAULT_ASSET_FACTORY, time_point, period)\n\n assert is_close(forecast.cagr(spy), .152)", "def calc_annual_investment(devs, param):\n\n observation_time = param[\"observation_time\"]\n interest_rate = param[\"interest_rate\"]\n q = 1 + param[\"interest_rate\"]\n\n \n # Calculate capital recovery factor\n CRF = ((q**observation_time)*interest_rate)/((q**observation_time)-1)\n \n # Calculate annuity factor for each device\n for device in devs.keys():\n \n # Get device life time\n life_time = devs[device][\"life_time\"]\n\n # Number of required replacements\n n = int(math.floor(observation_time / life_time))\n \n # Inestment for replcaments\n invest_replacements = sum((q ** (-i * life_time)) for i in range(1, n+1))\n\n # Residual value of final replacement\n res_value = ((n+1) * life_time - observation_time) / life_time * (q ** (-observation_time))\n\n # Calculate annualized investments \n if life_time > observation_time:\n devs[device][\"ann_factor\"] = (1 - res_value) * CRF \n else:\n devs[device][\"ann_factor\"] = ( 1 + invest_replacements - res_value) * CRF \n \n\n return devs", "def year_cost_rule(_m, y):\r\n\r\n return sum(m.RHO[y, s] * m.SCEN[y, s] for s in m.S)", "def calculate(self):\n\n rating = 0\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n rating += getattr(self, item, 0)\n\n self.overall = (rating / self.total) / .2", "def getLastAverage(self):\n lastAve=dict()\n lastAve['identifier']=self.lastWaveIdentifier\n lastAve['averageCalculated']=self.lastAverageCalculated \n lastAve['lastAverageArray']=self.lastAverageArray\n return lastAve", "def calc_year_based_saving_capacities(values, group, group_people_ratio):\n column = 'income_{}{}'.format(group, group_people_ratio)\n for index, obj in enumerate(values):\n if index < len(values) - 1:\n next_obj = values[index + 1]\n # between below years calculation\n years = (obj['year'], next_obj['year'])\n\n # find year specific income distribution for the one person who\n # belong the regarding group\n current_per_people_ratio = obj[column] / group_people_ratio\n next_per_people_ratio = next_obj[column] / group_people_ratio\n\n diff = next_per_people_ratio - current_per_people_ratio\n saving_capacity = diff / current_per_people_ratio\n yield {'year': years, 'savingcapacity': saving_capacity}", "def avg_after_harry():\n copy = movies.copy()\n copy = copy.sort_values(['Year']).reset_index(drop = True) #years early to present\n harry_years = copy[copy['#1 Movie'].str.contains('Harry')].Year #years where harry potter was #1\n next_years = harry_years + 1\n check = list(next_years.values)\n next_years_df = copy[copy['Year'].isin(check)]\n avg = next_years_df['Number of Movies'].mean()\n if avg is np.nan:\n raise\n return ('avg_after_harry', avg)", "def averaged_risk(self):\n return self._averaged_risk", "def averaged_risk(self):\n return self._averaged_risk", "def wwhr_savings(dwelling):\n # TODO: Variables were defined but not used\n # savings = 0\n # Nshower_with_bath = 1\n # Nshower_without_bath = 0\n Nshower_and_bath = dwelling.wwhr_total_rooms_with_shower_or_bath\n\n S_sum = 0\n for sys in dwelling.wwhr_systems:\n effy = sys['pcdf_sys']['effy_mixer_shower'] / 100\n util = sys['pcdf_sys']['utilisation_mixer_shower']\n S_sum += (sys['Nshowers_with_bath'] * .635 * effy *\n util + sys['Nshowers_without_bath'] * effy * util)\n\n Seff = S_sum / Nshower_and_bath\n Tcoldm = numpy.array(\n [11.1, 10.8, 11.8, 14.7, 16.1, 18.2, 21.3, 19.2, 18.8, 16.3, 13.3, 11.8])\n Awm = .33 * 25 * MONTHLY_HOT_WATER_TEMPERATURE_RISE / (41 - Tcoldm) + 26.1\n Bwm = .33 * 36 * MONTHLY_HOT_WATER_TEMPERATURE_RISE / (41 - Tcoldm)\n\n savings = (dwelling.Nocc * Awm + Bwm) * Seff * (35 - Tcoldm) * \\\n 4.18 * DAYS_PER_MONTH * MONTHLY_HOT_WATER_FACTORS / 3600.\n\n return savings", "def avg(year):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"year\"].isin([year])]\r\n df = df[(\r\n df[\r\n \"emissions\"\r\n ] == 'Emissions (thousand metric tons of carbon dioxide)'\r\n )]\r\n print(df)\r\n mean_value = df.mean()['value']\r\n resultat = {}\r\n resultat[\"year\"] = year\r\n resultat['total'] = float(mean_value)\r\n print(mean_value)\r\n return resultat", "def take_attendance():\n\t\tcount = 0\n\t\tfor person in Simulation.community:\n\t\t\tif Simulation.community[person].went_to_bar():\n\t\t\t\tcount += 1\n\t\tprint(count)\n\t\tStrategy.evalScore(count)\n\t\tSimulation.eval_randoms(count)\n\t\tSimulation.add_to_memory(count)", "def _cost_wage(self):\n avg_drive_time = np.random.normal(self.driving_time, self.driving_time_std)\n hourly_wage = np.random.normal(self.hourly_wage, self.hourly_wage_std)\n total = avg_drive_time * hourly_wage\n return total", "def calculateMarriedTax(husbandIncome, wifeIncome):\n pass", "def current_annual_data(self):\n return self._current_annual_data", "def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, NDVI_Product, dict_crops, dict_non_crops, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Three as Three\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet3 as Generate\n import wa.Functions.Start.Get_Dictionaries as GD\n \n ######################### Set General Parameters ##############################\n\n # Check if there is a full year selected between Startdate and Enddate, otherwise Sheet 3 cannot be produced \n try:\n years_end = pd.date_range(Startdate,Enddate,freq=\"A\").year\n years_start = pd.date_range(Startdate,Enddate,freq=\"AS\").year\n if (len(years_start) == 0 or len(years_end) == 0):\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n years = np.unique(np.append(years_end,years_start))\n except:\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \t\n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n\n #Set Startdate and Enddate for moving average\n ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0') \n Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())\n Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)\n Enddate_Moving_Average = pd.Timestamp(Enddate) + pd.DateOffset(months = 0)\n Startdate_Moving_Average_String = '%d-%02d-%02d' %(Startdate_Moving_Average.year, Startdate_Moving_Average.month, Startdate_Moving_Average.day)\n Enddate_Moving_Average_String = '%d-%02d-%02d' %(Enddate_Moving_Average.year, Enddate_Moving_Average.month, Enddate_Moving_Average.day)\n\n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String, P_Product, Daily = 'n') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String)\n Data_Path_NDVI = Start.Download_Data.NDVI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate)\n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n\n # Create monthly GPP\n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n # Create monthly NDVI based on MOD13\n if NDVI_Product == 'MOD13':\n Dir_path_NDVI = os.path.join(Dir_Basin, Data_Path_NDVI)\n Start.Sixteendaily_to_monthly_state.Nearest_Interpolate(Dir_path_NDVI, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n DataCube_LU[DataCube_LU<0] = np.nan\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 3)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n #_______________________________Evaporation________________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #____________________________________NDVI__________________________________\n\n info = ['monthly','-', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n\n Name_NC_NDVI = DC.Create_NC_name('NDVI', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDVI):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDVI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDVI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDVI, DataCube_NDVI, 'NDVI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_NDVI\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_Prec\n\n #________________________Reference Evaporation______________________________\n\n # Reference Evapotranspiration data\n Name_NC_ETref = DC.Create_NC_name('ETref', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ETref):\n\n # Get the data of Evaporation and save as nc\n DataCube_ETref = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ETref, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ETref, DataCube_ETref, 'ETref', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_ETref\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n ############################# Calculate Sheet 3 ###########################\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n #____________ Evapotranspiration data split in ETblue and ETgreen ____________\n\n Name_NC_ETgreen = DC.Create_NC_name('ETgreen', Simulation, Dir_Basin, 3, info)\n Name_NC_ETblue = DC.Create_NC_name('ETblue', Simulation, Dir_Basin, 3, info)\n \n if not (os.path.exists(Name_NC_ETgreen) or os.path.exists(Name_NC_ETblue)):\n\n # Calculate Blue and Green ET\n DataCube_ETblue, DataCube_ETgreen = Three.SplitET.Blue_Green(Startdate, Enddate, Name_NC_LU, Name_NC_ETref, Name_NC_ET, Name_NC_P)\n\n # Save the ETblue and ETgreen data as NetCDF files\n DC.Save_as_NC(Name_NC_ETblue, DataCube_ETblue, 'ETblue', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n DC.Save_as_NC(Name_NC_ETgreen, DataCube_ETgreen, 'ETgreen', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n\n del DataCube_ETblue, DataCube_ETgreen\n \n #____________________________ Create the empty dictionaries ____________________________\n \n # Create the dictionaries that are required for sheet 3 \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary = GD.get_sheet3_empties()\n \n #____________________________________ Fill in the dictionaries ________________________\n\n # Fill in the crops dictionaries \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary = Three.Fill_Dicts.Crop_Dictionaries(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops, Name_NC_LU, Name_NC_ETgreen, Name_NC_ETblue, Name_NC_NDM, Name_NC_P, Dir_Basin)\n\n # Fill in the non crops dictionaries \n wp_y_non_crop_dictionary = Three.Fill_Dicts.Non_Crop_Dictionaries(wp_y_non_crop_dictionary, dict_non_crops)\n\n for year in years:\n\n ############################ Create CSV 3 ################################# \n \n csv_fh_a, csv_fh_b = Generate.CSV.Create(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary, Basin, Simulation, year, Dir_Basin)\n\n ############################ Create Sheet 3 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, csv_fh_a, csv_fh_b)\n \n return()", "def emissions_baseline(self):\n baseline = DataFrame(columns=[\"CO2\", \"NOx\", \"PM10\", \"PM2.5\", \"SO2\"])\n baseline = baseline.append(year_1(self.plant.emissions()))\n baseline = baseline.append(year_1(self.plant.fuel_reseller().emissions()))\n baseline = baseline.append(year_1(self.farmer.emissions_exante))\n baseline.loc[\"Total\"] = baseline.sum()\n baseline.loc[\"Total_plant\"] = baseline.iloc[0]\n baseline.loc[\"Total_transport\"] = baseline.iloc[1]\n baseline.loc[\"Total_field\"] = baseline.iloc[2]\n return baseline", "def calculate_activities(self):\n # Sleep\n sleep = self.sleep_hours * 0.95\n\n # Work\n if self.work_intensity == self.INTENSITY_LOW:\n work_factor = 1.5\n elif self.work_intensity == self.INTENSITY_MEDIUM:\n work_factor = 1.8\n else:\n work_factor = 2.2\n work = self.work_hours * work_factor\n\n # Sport (entered in hours/week, so we must divide)\n if self.sport_intensity == self.INTENSITY_LOW:\n sport_factor = 4\n elif self.sport_intensity == self.INTENSITY_MEDIUM:\n sport_factor = 6\n else:\n sport_factor = 10\n sport = (self.sport_hours / 7.0) * sport_factor\n\n # Free time\n if self.freetime_intensity == self.INTENSITY_LOW:\n freetime_factor = 1.3\n elif self.freetime_intensity == self.INTENSITY_MEDIUM:\n freetime_factor = 1.9\n else:\n freetime_factor = 2.4\n freetime = self.freetime_hours * freetime_factor\n\n # Total\n total = (sleep + work + sport + freetime) / 24.0\n return decimal.Decimal(str(total)).quantize(TWOPLACES)", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def trial_atr(trial, omit_missing_frames=True):\n frames = trial.HMM_MLE\n if omit_missing_frames:\n frames = frames[frames >= 0]\n\n runs = calc_run_lengths(trial.HMM_MLE)\n return_times = []\n current_return_time = 0\n for run in runs:\n if run.object == 0:\n return_times.append(current_return_time/60)\n current_return_time = 0\n else:\n current_return_time += run.length\n return np.mean(return_times)", "def anova_analysis(df):\n time_periods = df.groupby(['week_ending','Holiday'],as_index = False)[['seats_sold']].sum()\n TG = time_periods.loc[time_periods['Holiday'] == 'ThanksGiving','seats_sold']\n WB = time_periods.loc[time_periods['Holiday'] == 'WinterBreak','seats_sold']\n SB = time_periods.loc[time_periods['Holiday'] == 'SummerBreak','seats_sold']\n NH = time_periods.loc[time_periods['Holiday'] == 'Not Holiday','seats_sold']\n f,p = stats.f_oneway(TG,WB,SB,NH)\n print('The f and p of ANOVA analysis are:')\n print(f,p)\n\n ## plot the mean of each group\n time_periods.boxplot('seats_sold', by='Holiday', figsize=(12, 8))\n fileName = 'ANOVA.png'\n plt.savefig(fileName)\n\n print(\"The mean seats sold of each time periods:\")\n print(time_periods.groupby('Holiday')['seats_sold'].mean())\n\n pairwise = MultiComparison(time_periods['seats_sold'], time_periods['Holiday'])\n result = pairwise.tukeyhsd()\n print(pairwise)\n print(result)\n #print(pairwise.groupsunique)", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet2 as Generate\n \n ######################### Set General Parameters ##############################\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n \n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, P_Product, Daily = 'y') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_LAI = Start.Download_Data.LAI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, LAI_Product) \n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Daily = os.path.join(Data_Path_P, 'Daily')\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create Rainy Days based on daily CHIRPS\n Data_Path_RD = Two.Rainy_Days.Calc_Rainy_Days(Dir_Basin, Data_Path_P_Daily, Startdate, Enddate)\n\n # Create monthly LAI\n Dir_path_LAI = os.path.join(Dir_Basin, Data_Path_LAI)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_LAI, Startdate, Enddate)\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n \n # Create monthly GPP \n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 2)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_Prec\n\n #_______________________________Evaporation________________________________\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n #_______________________________Rainy Days_________________________________\n\n # Define info for the nc files\n info = ['monthly','days', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_RD = DC.Create_NC_name('RD', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_RD):\n\n # Get the data of Evaporation and save as nc\n DataCube_RD = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_RD, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_RD, DataCube_RD, 'RD', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_RD\n\n #_______________________________Leaf Area Index____________________________\n\n # Define info for the nc files\n info = ['monthly','m2-m-2', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_LAI = DC.Create_NC_name('LAI', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_LAI):\n\n # Get the data of Evaporation and save as nc\n DataCube_LAI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_LAI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_LAI, DataCube_LAI, 'LAI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_LAI\n\n ####################### Calculations Sheet 2 ##############################\n \n DataCube_I, DataCube_T, DataCube_E = Two.SplitET.ITE(Dir_Basin, Name_NC_ET, Name_NC_LAI, Name_NC_P, Name_NC_RD, Name_NC_NDM, Name_NC_LU, Startdate, Enddate, Simulation)\n \n ############################ Create CSV 2 ################################# \n\n Dir_Basin_CSV = Generate.CSV.Create(Dir_Basin, Simulation, Basin, Startdate, Enddate, Name_NC_LU, DataCube_I, DataCube_T, DataCube_E, Example_dataset)\n\n ############################ Create Sheet 2 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV)\n\n return()", "def insurance(self):\n insurance_cost = 0.0056 * self.input_dict['project_value_usd']\n return insurance_cost", "def year_emissions_intensity_rule(_m, y):\r\n\r\n return m.YEAR_EMISSIONS[y] / m.YEAR_DEMAND[y]", "def scenario_average_price_rule(_m, y, s):\r\n\r\n return m.SCENARIO_REVENUE[y, s] / m.SCENARIO_DEMAND[y, s]", "def SetupYearRecForIncomeTax(\n self, earnings=0, oas=0, gis=0, cpp=0, ei=0,\n rrsp=0, bridging=0,nonreg=0, gains=0, eoy_gains=0,\n unapplied_losses=0, rrsp_contributions=0,\n age=30, retired=False, cpi=1):\n j_canuck = person.Person(strategy=self.default_strategy)\n j_canuck.capital_loss_carry_forward = unapplied_losses\n j_canuck.age += age - world.START_AGE\n j_canuck.year += age - world.START_AGE\n j_canuck.retired = retired\n\n year_rec = utils.YearRecord()\n year_rec.is_retired = j_canuck.retired\n year_rec.year = j_canuck.year\n year_rec.incomes.append(incomes.IncomeReceipt(earnings, incomes.INCOME_TYPE_EARNINGS))\n year_rec.incomes.append(incomes.IncomeReceipt(oas, incomes.INCOME_TYPE_OAS))\n year_rec.incomes.append(incomes.IncomeReceipt(gis, incomes.INCOME_TYPE_GIS))\n year_rec.incomes.append(incomes.IncomeReceipt(cpp, incomes.INCOME_TYPE_CPP))\n year_rec.incomes.append(incomes.IncomeReceipt(ei, incomes.INCOME_TYPE_EI))\n year_rec.withdrawals.append(funds.WithdrawReceipt(nonreg, gains, funds.FUND_TYPE_NONREG))\n year_rec.withdrawals.append(funds.WithdrawReceipt(rrsp, 0, funds.FUND_TYPE_RRSP))\n year_rec.withdrawals.append(funds.WithdrawReceipt(bridging, 0, funds.FUND_TYPE_BRIDGING))\n year_rec.tax_receipts.append(funds.TaxReceipt(eoy_gains, funds.FUND_TYPE_NONREG))\n year_rec.deposits.append(funds.DepositReceipt(rrsp_contributions, funds.FUND_TYPE_RRSP))\n year_rec.cpi = cpi\n\n year_rec = j_canuck.CalcPayrollDeductions(year_rec)\n\n return (j_canuck, year_rec)", "def generate_extra_data(self):\n self.data[\"male_initial\"], self.data[\"female_initial\"] = \\\n self.get_initial_student_count()\n \n date_line = '<p class=\"report-title\"> %s</p>' \\\n %(self.start_date.strftime(\"%B %Y\"))\n row1 = \"\"\"\n <table>\n <tr class=\"tblRow\"><td>%s</td><td>Enrollment For Year</td>\n <td>Male:</td><td>%d</td><td>Female:</td><td>%d</td>\n <td>Total:</td><td>%d</td></tr>\n \"\"\" %(unicode(self.school), self.data[\"male_initial\"], \n self.data[\"female_initial\"], \n self.data[\"male_initial\"] + self.data[\"female_initial\"])\n row2 = \"\"\"\n <tr class=\"tblOddRow\"><td>%s</td><td>Enrollment For Month</td>\n <td>Male:</td><td>%d</td><td>Female:</td><td>%d</td>\n <td>Total:</td><td>%d</td></tr>\n \"\"\" %(unicode(self.section), self.data[\"male_current\"], \n self.data[\"female_current\"],\n self.data[\"male_current\"] + self.data[\"female_current\"])\n row3 = \"\"\"\n <tr class=\"tblRow\"><td>%s</td><td>Average Attendance</td>\n <td>Male:</td><td>%.1f</td><td>Female:</td><td>%.1f</td>\n <td>Total:</td><td>%.1f</td></tr>\n \"\"\" %(\"Secondary\", self.data[\"aa_male\"], self.data[\"aa_female\"] ,\n self.data[\"aa_combined\"])\n row4 =\"\"\"\n <tr class=\"tblOddRow\"><td>%s</td><td>Percentage of Attendance</td>\n <td>Male:</td><td>%.1f %% </td><td>Female:</td><td>%.1f %% </td>\n <td>Total:</td><td>%.1f %% </td></tr>\n \"\"\" %(unicode(self.school.municipality), self.data[\"pa_male\"], \n self.data[\"pa_female\"], self.data[\"pa_combined\"])\n row5 = \"\"\"\n <tr class=\"tblRow\"><td>School Days: %d</td><td>Percentage of Enrollment</td>\n <td>Male:</td><td>%.1f %% </td><td>Female:</td><td>%.1f %% </td>\n <td>Total:</td><td>%.1f %% </td></tr>\n </table>\n \"\"\" %(self.data[\"num_school_days\"], \n self.data[\"male_current\"] * 100.0 / self.data[\"male_initial\"],\n self.data[\"female_current\"] * 100.0 / \n self.data[\"female_initial\"],\n (self.data[\"male_current\"] + self.data[\"female_current\"]) * \n 100.0 /\n (self.data[\"male_initial\"] + self.data[\"female_initial\"]))\n self.extra_data = date_line + row1 + row2 + row3 + row4 + row5", "def annualized_std(self, series: pd.Series = None):\n\n if series is None:\n series = self.trading_history[\"total_assets\"]\n\n log_differential = np.log(series / series.shift(-1))\n hourly_std = np.std(log_differential)\n annualized_std = hourly_std * np.sqrt(365)\n return annualized_std", "def annual_return(returns, period=DAILY):\n\n if returns.size < 1:\n return np.nan\n\n try:\n ann_factor = ANNUALIZATION_FACTORS[period]\n except KeyError:\n raise ValueError(\n \"period cannot be '{}'. \"\n \"Must be '{}', '{}', or '{}'\".format(\n period, DAILY, WEEKLY, MONTHLY\n )\n )\n\n num_years = float(len(returns)) / ann_factor\n df_cum_rets = cum_returns(returns, starting_value=100)\n start_value = 100\n end_value = df_cum_rets.iloc[-1]\n\n total_return = (end_value - start_value) / start_value\n annual_return = (1. + total_return) ** (1 / num_years) - 1\n\n return annual_return", "def savings(self):\n return self.SAVINGS_FACTOR * self.log_count.info / 60", "def GrowthAPRWithUncertainty(self, years=10):\n import math, datetime\n\n average_annual = self.GrowthAPR(years) / 100.\n average_daily = math.pow(1 + average_annual, 1 / 365) - 1.\n \n i = 0\n today = datetime.datetime.now()\n while i < len(self.history):\n if today - self.history[i].date < datetime.timedelta(days=365.25*years):\n break\n i += 1\n uncertainty = 0.\n filter_days = 20\n n_samples = 0\n while i < len(self.history):\n today = self.history[i].price\n previous = self.history[i - 1].price\n change = (today - previous) / previous\n uncertainty += ((1. + change) ** (1 / filter_days) - (1. + average_daily)) ** 2\n n_samples += 1\n i += filter_days\n uncertainty /= n_samples - 1.\n uncertainty = math.sqrt(uncertainty)\n uncertainty *= 365.25 * (1. + average_annual)\n\n return (100. * average_annual, 100. * uncertainty)", "def annualize_rets(r, periods_per_year):\n compounded_growth = (1+r).prod()\n n_periods = r.shape[0]\n return compounded_growth**(periods_per_year/n_periods)-1", "def eta_details(self):\n\t\t# Experimentation gives you 72pts to a random science every production\n\t\t# Stupid brute force implementation for now\n\t\trequired = self.required\n\t\trate = self.player.science\n\t\tdef combine(base, add, add_time, chance):\n\t\t\t# add given add into base with +add_time tick and modified by chance\n\t\t\tfor time, p in add.items():\n\t\t\t\ttime += add_time\n\t\t\t\tp *= chance\n\t\t\t\tbase[time] = base.get(time, 0) + p\n\t\tdef _eta_details(value, time_to_prod=self.galaxy.production_rate):\n\t\t\tnaive_eta = max(0, int(math.ceil((required - value)/rate)))\n\t\t\tif naive_eta <= time_to_prod: return {naive_eta: 1}\n\t\t\tbase = {}\n\t\t\twithout_extra = _eta_details(value + rate*time_to_prod)\n\t\t\twith_extra = _eta_details(value + rate*time_to_prod + 72)\n\t\t\tcombine(base, without_extra, time_to_prod, 6/7.)\n\t\t\tcombine(base, with_extra, time_to_prod, 1/7.)\n\t\t\treturn base\n\t\treturn _eta_details(self.current, self.galaxy.production_rate - self.galaxy.production_counter)", "def strikeout_percentage_average(df,start_year, end_year,bat_met, player_name):\n\n base_fields = ['PA']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n k_val = round((pd.to_numeric(df['K.'].str.split('%').str[0])/100)*df['PA'],0).sum()\n pa_total = df['PA'].fillna(0).sum()\n return \"{:.2%}\".format(k_val / pa_total)\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n return strikeout_percentage_average(df,start_year, end_year,bat_met, player_name)", "def calculate_iron_hemoglobin_time_lag_effective_fraction(df, years):\n final = pd.DataFrame()\n data = df.reset_index()\n for i in list(range(0, len(years))):\n current = (data.loc[data.year == years[i]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n if i == 0:\n for draw in list(range(0, 1000)):\n current[f'draw_{draw}'] = 1\n else:\n prior = (data.loc[data.year == years[i - 1]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n current = 1 - ((current - prior) * 0.75 / current)\n current['year'] = years[i]\n final = pd.concat([final, current])\n final = final.reset_index().set_index([c for c in data.columns if 'draw' not in c]).sort_index()\n return final", "def getSalaryStat(self, year = 2014):\r\n year_str = str(year+1)\r\n \r\n df = pd.read_html('http://espn.go.com/nba/salaries/_/year/' + year_str)[0]\r\n df.columns = df.iloc[0]\r\n df = df[df.NAME != 'NAME']\r\n \r\n for page in range(2,12):\r\n page_df = pd.read_html('http://espn.go.com/nba/salaries/_/year/' + \\\r\n year_str + '/page/' + str(page))[0]\r\n page_df.columns = page_df.iloc[0]\r\n page_df = page_df[page_df.NAME != 'NAME']\r\n df = pd.concat([df, page_df])\r\n \r\n df['SALARY'] = df['SALARY'].replace('[\\$,]', '', regex=True).astype(int)\r\n cut_df = df[['NAME', 'SALARY']]\r\n cut_df = cut_df.reset_index(drop=True)\r\n cut_df.columns = [['PLAYER_NAME', 'SALARY']]\r\n \r\n salary_df = self.fixSalaryName(cut_df)\r\n \r\n return salary_df", "def calc_savings_needed():\r\n annual_salary_real = float(input(\"Starting Salary: \"))\r\n #portion_saved = float(input(\"Enter the percent of your salary to save, as a decimal: \"))\r\n total_cost = 1000000#float(input(\"Enter the cost of your dream home: \"))\r\n semi_annual_raise = .07#float(input(\"Enter the semi annual raise as decimal: \"))\r\n portion_down_payment = .25\r\n current_savings = 0\r\n r = .04;\r\n monthly_salary = annual_salary_real / 12 \r\n \r\n months = 36\r\n epsilon = 100\r\n low = 0.0\r\n high = 1.0\r\n guess = (low + high) / 2.0\r\n num_guesses = 0\r\n breaked = False\r\n \r\n while(abs(current_savings - total_cost*portion_down_payment) > epsilon):\r\n if(guess == 1.0):\r\n breaked = True\r\n break\r\n #print(\"new guess: \",guess)\r\n annual_salary = annual_salary_real\r\n monthly_salary = annual_salary/12\r\n num_guesses += 1 \r\n \r\n month = 0\r\n current_savings = 0\r\n while( month < months):\r\n \r\n current_savings += (current_savings * r /12) + (guess * monthly_salary)\r\n month += 1\r\n if ((month % 6) == 0):\r\n annual_salary += annual_salary * semi_annual_raise\r\n monthly_salary = annual_salary / 12\r\n \r\n if(abs(current_savings - (total_cost*portion_down_payment)) > epsilon):\r\n #print(\"current_savings: \",current_savings)\r\n if(current_savings < (total_cost*portion_down_payment)):\r\n low = guess\r\n else:\r\n high = guess\r\n guess = (low + high) / 2.0\r\n \r\n if(breaked):\r\n print(\"It is not possible to pay down payment in 3 years.\")\r\n else: \r\n print(\"Best savings rate: \", guess)\r\n print(\"Steps in bisection search: \", num_guesses)", "def _award_accounts(self):\n\n prize_money = 0\n for i in xrange(len(self.accounts)):\n # Each savings account has a 1% chance of quadrupling their principal. The\n # chance is independent between accounts.\n if random.randint(1, 100) == 1:\n prize_money += 3 * self.accounts[i]\n self.accounts[i] *= 4\n return prize_money", "def get_statistic_for_user(self, attr):\n all_payments = Payment.objects.payments(user=self).exclude(project__isnull=True)\n user_impact = 0\n for payment in all_payments:\n project = payment.project\n if project:\n user_financial_contribution = payment.amount\n project_funding_total = (int)(project.funding_goal)\n project_impact = getattr(project.statistics, attr)\n user_impact_for_project = project_impact * user_financial_contribution * 1.0 / project_funding_total\n user_impact += user_impact_for_project\n return user_impact", "def Salvage(self):\n pass", "def __calculate_monthly_interest(self):\n return self.__percentage_interest / 12", "def getETA():", "def getETA():", "def average_emission(data: List[EmissionPerCapita], current_year: int) -> float:\r\n\r\n index = current_year - data[0].start_year # get the index for current year\r\n\r\n # Get all emissions from that year.\r\n current_year_emissions = []\r\n for countries in data:\r\n current_year_emissions.append(countries.epc_year[index])\r\n\r\n average = sum(current_year_emissions) / len(data)\r\n return average", "def life_insurance_to_recive_total(self):\n pass", "def average_age_nt(all_profile_nt: namedtuple) -> tuple:\n \"\"\"Param: all_profile_nt: Named tuple containing all profiles\"\"\"\n today = date.today()\n value = sum(map(lambda v: today.year - v[-1].year - ((today.month, today.day) < (\n v[-1].month, v[-1].day)), all_profile_nt))/len(all_profile_nt)\n return value", "def calc_stat_values(self):", "def americanprice(self):\n self.americanpay = np.zeros((self.steps+1,self.steps+1))\n self.optionvalue = np.zeros((self.steps+1,self.steps+1))\n self.exercisevalue = np.zeros((self.steps+1,self.steps+1))\n self.americanpay[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n self.optionvalue[i-1][j] = (self.americanpay[i][j]*self.upprob + self.americanpay[i][j+1]*(1-self.upprob))/discount\n self.exercisevalue[i-1][j] = max(self.pricetree[i-1][j]-self.s,0.0)\n self.americanpay[i-1][j] = max(self.optionvalue[i-1][j],self.exercisevalue[i-1][j])\n return self.americanpay[0][0]", "def compute_advantage(self, trials=1000):\n\n return self.compute_success_ratio(1, trials) - (1 - self.compute_success_ratio(0, trials))", "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def _predict(self, fh, X):\n y_pred_df = pd.concat(self._predict_forecasters(fh, X), axis=1)\n # apply weights\n y_pred = y_pred_df.apply(lambda x: np.average(x, weights=self.weights_), axis=1)\n y_pred.name = self._y.name\n return y_pred", "def glycolysis_rate_cal (self) :\n x = self.mitochondria.get_atp()\n y = self.mitochondria.get_adp()\n a = self.atp\n b = self.adp\n self.adp_to_atp(self.mitochondria.atp_translocase(math.ceil((x*b - a*y)/(a+b+x+y))))\n if a<1 :\n return\n else :\n self.set_glycolysis(int(5*b/a))", "def calc_schedule_yearly(self):\n \n schedule = self.schedule\n\n # Yearly\n start_date = schedule['Date'].min()\n schedule['years_out'] = schedule['Date'].apply(lambda x: relativedelta(x, start_date).years)\n schedule['end_of_year_window'] = schedule['years_out'].diff(-1).fillna(-1).abs()\n \n schedule_yr = schedule\\\n .groupby([schedule['years_out']])\\\n .agg({'Payment': 'sum',\n 'Interest': 'sum',\n 'Additional_Payment': 'sum',\n 'Date': 'min',\n 'Date': 'max'\n }\n )\n\n schedule_yr.reset_index(drop=False, inplace=True)\n schedule_yr['Principal'] = schedule_yr['Payment'] - schedule_yr['Interest']\n \n yearly_end_balances = schedule.loc[schedule['end_of_year_window'] == 1, \n ['years_out', 'End Balance']]\n\n schedule_yr = schedule_yr.merge(yearly_end_balances, how='inner', on=['years_out'])\n\n return schedule_yr", "def calc_performance(self):\n for symbol in self.portfolio.assets.keys():\n\n # Total the Performance of all the trades\n start = self.portfolio.trades[symbol].index[0]\n end = self.portfolio.trades[symbol].index[-1]\n trades = len(self.record[symbol])\n profit = self.record[symbol]['profit'].sum()\n loss = self.record[symbol]['loss'].sum()\n # Total or average the trade info for all the trades\n try:\n wins = len(self.record[symbol].groupby('win/loose').groups['w'])\n except (ValueError, KeyError):\n wins = 0\n try:\n losses = len(self.record[symbol].groupby('win/loose').groups['l'])\n except (ValueError, KeyError):\n losses = 0\n try:\n washes = len(self.record[symbol].groupby('win/loose').groups['-'])\n except (ValueError, KeyError):\n washes = 0\n max_drawdown = self.record[symbol]['drawdown'].max()\n average_drawdown = self.record[symbol]['drawdown'].mean()\n max_drawdown_time = self.record[symbol]['drawdown days'].max()\n average_drawdown_time = self.record[symbol]['drawdown days'].mean()\n # Average the risk and market comparisons for all trades\n vol_risk = self.record[symbol]['volatility'].mean()\n beta = self.record[symbol]['beta'].mean()\n lpm_risk = self.record[symbol]['lpm'].mean()\n e_r = self.record[symbol]['expected_return'].mean()\n # Calculate Risk measures\n treynor_ratio = (e_r - self.risk_free_return) / beta\n sharpe_ratio = (e_r - self.risk_free_return) / vol_risk\n # Package up the data for each symbol\n self.performance[symbol] = {\n 'start': start,\n 'end': end,\n 'trades': trades,\n 'wins': wins,\n 'losses': losses,\n 'washes': washes,\n 'profit': profit,\n 'loss': loss,\n 'net_profit': profit - loss,\n 'profit_factor': profit / loss if loss != 0 else 1.0,\n 'percent_profitable': wins / trades if trades != 0 else 0.0,\n 'average_trade_net_profit' : (profit - loss) / trades if trades != 0 else 0.0,\n 'max_drawdown' : max_drawdown,\n 'average_drawdown' : average_drawdown,\n 'max_drawdown_days' : max_drawdown_time,\n 'average_drawdown_days' : average_drawdown_time,\n 'volatility_risk' : vol_risk,\n 'beta' : beta,\n 'lower_partial_moment_risk' : lpm_risk,\n 't_r' : treynor_ratio,\n 's_r' : sharpe_ratio\n }\n\n return self", "def result(self):\n return dict(\n train = self.mean_makespan_train/self.step,\n baseline = self.mean_makespan_baseline/self.step\n )", "def moving_avg_COVID19(self):\r\n \r\n # Calculate moving weekly averages and range. Use the total range of the pandemic\r\n \r\n # First month of outbreak (that we have data for)\r\n first_month = min(self.day_mort_gov[0].month,self.day_m_google[0].month)\r\n # First day of outbreak\r\n first_day = min(self.day_mort_gov[0].day,self.day_m_google[0].day)\r\n self.outbreak_start_date = datetime.datetime(2020,first_month,first_day)\r\n # Last day of data\r\n last_data_month = max(self.day_mort_gov[-1].month,self.day_m_google[-1].month)\r\n last_data_day = max(self.day_mort_gov[-1].month,self.day_m_google[-1].month)\r\n self.outbreak_last_data_date = datetime.datetime(2020,last_data_day,last_data_day)\r\n \r\n self.num_days_outbreak = (self.outbreak_last_data_date-self.outbreak_start_date).days\r\n \r\n\r\n \r\n # Get days and data on days\r\n self.outbreak_obs_days = np.zeros(self.num_days_outbreak ,dtype=datetime.datetime)\r\n self.outbreak_obs_days[0] = self.outbreak_start_date\r\n self.t_outbreak = np.arange(0,self.num_days_outbreak,step=1)\r\n self.R_data_daily = np.nan*np.ones(self.num_days_outbreak )\r\n self.m_data_daily = np.nan*np.ones(self.num_days_outbreak )\r\n \r\n for day in range(0,self.num_days_outbreak):\r\n \r\n if day > 0:\r\n \r\n self.outbreak_obs_days[day] = self.outbreak_obs_days[day-1] + datetime.timedelta(days=1)\r\n \r\n for day2 in range(0,len(self.day_mort_gov)):\r\n \r\n if (self.outbreak_obs_days[day].day == self.day_mort_gov[day2].day and self.outbreak_obs_days[day].month == self.day_mort_gov[day2].month):\r\n\r\n self.R_data_daily[day] = self.R_per_day_gov[day2]\r\n \r\n \r\n break\r\n \r\n for day3 in range(0,len(self.day_m_google)):\r\n \r\n if (self.outbreak_obs_days[day].day == self.day_m_google[day3].day and self.outbreak_obs_days[day].month == self.day_m_google[day3].month):\r\n \r\n self.m_data_daily[day] = self.m_google[day3]\r\n \r\n \r\n break\r\n \r\n \r\n # Get weekly sets\r\n \r\n # Firstly we find weeks\r\n self.num_weeks_outbreak = 0\r\n\r\n for day in range(0,self.num_days_outbreak):\r\n\r\n if self.outbreak_obs_days[day].weekday() == 0:\r\n \r\n \r\n if day + 7 < self.num_days_outbreak-1:\r\n \r\n self.num_weeks_outbreak = self.num_weeks_outbreak + 1\r\n \r\n # Next find specific date for week\r\n self.outbreak_obs_weekly = np.zeros(self.num_weeks_outbreak,dtype=datetime.datetime)\r\n self.R_week_50 = np.nan*np.ones(self.num_weeks_outbreak)\r\n self.R_week_95 = np.nan*np.ones((2,self.num_weeks_outbreak))\r\n self.m_week_50 = np.nan*np.ones(self.num_weeks_outbreak)\r\n self.m_week_95 = np.nan*np.ones((2,self.num_weeks_outbreak))\r\n \r\n \r\n week = 0\r\n \r\n for day in range(0,self.num_days_outbreak):\r\n \r\n if self.outbreak_obs_days[day].weekday() == 0:\r\n \r\n \r\n if day + 7 < self.num_days_outbreak-1:\r\n self.outbreak_obs_weekly[week] = self.outbreak_obs_days[day] + (self.outbreak_obs_days[day+7] - self.outbreak_obs_days[day])/2\r\n self.R_week_95[0,week] = np.percentile(self.R_data_daily[day:day+8],5)\r\n self.R_week_95[1,week] = np.percentile(self.R_data_daily[day:day+8],95) \r\n \r\n self.R_week_50[week] = np.percentile(self.R_data_daily[day:day+8],50)\r\n self.R_week_95[0,week] = np.percentile(self.R_data_daily[day:day+8],5)\r\n self.R_week_95[1,week] = np.percentile(self.R_data_daily[day:day+8],95) \r\n self.m_week_95[0,week] = np.percentile(self.m_data_daily[day:day+8],5)\r\n self.m_week_95[1,week] = np.percentile(self.m_data_daily[day:day+8],95) \r\n self.m_week_50[week] = np.percentile(self.m_data_daily[day:day+8],50) \r\n \r\n week = week + 1\r\n \r\n \r\n \r\n # Get Monthly sets\r\n # Firstly we find weeks\r\n \r\n self.num_months_outbreak = 0\r\n \r\n current_month = -1\r\n\r\n for day in range(0,self.num_days_outbreak):\r\n\r\n if self.outbreak_obs_days[day].month > current_month:\r\n \r\n current_month = self.outbreak_obs_days[day].month\r\n num_days_in_month = (datetime.datetime(2020,current_month+1,1))-datetime.datetime(2020,current_month,1) \r\n self.num_months_outbreak = self.num_months_outbreak + 1\r\n \r\n \r\n # Next find specific date for week\r\n self.outbreak_obs_months = np.zeros(self.num_months_outbreak,dtype=datetime.datetime)\r\n \r\n self.R_month_50 = np.nan*np.ones(self.num_months_outbreak)\r\n self.R_month_95 = np.nan*np.ones((2,self.num_months_outbreak))\r\n self.m_month_50 = np.nan*np.ones(self.num_months_outbreak)\r\n self.m_month_95 = np.nan*np.ones((2,self.num_months_outbreak))\r\n \r\n \r\n current_month = -1\r\n month = 0\r\n \r\n for day in range(0,self.num_days_outbreak):\r\n \r\n if self.outbreak_obs_days[day].month > current_month: \r\n \r\n current_month = self.outbreak_obs_days[day].month\r\n dmonth = datetime.datetime(2020,current_month+1,1)-datetime.datetime(2020,current_month,1)\r\n self.outbreak_obs_months[month] = self.outbreak_obs_days[day] + (datetime.datetime(2020,current_month+1,1)-datetime.datetime(2020,current_month,1))/2\r\n num_days_in_month = min(day+dmonth.days,self.num_days_outbreak)\r\n self.R_month_95[0,month] = np.nanpercentile(self.R_data_daily[day:num_days_in_month],5)\r\n self.R_month_95[1,month] = np.nanpercentile(self.R_data_daily[day:num_days_in_month],95)\r\n self.R_month_50[month] = np.nanpercentile(self.R_data_daily[day:num_days_in_month],50)\r\n self.R_month_95[0,month] = np.nanpercentile(self.R_data_daily[day:num_days_in_month],5)\r\n self.R_month_95[1,month] = np.nanpercentile(self.R_data_daily[day:num_days_in_month],95) \r\n self.m_month_95[0,month] = np.nanpercentile(self.m_data_daily[day:num_days_in_month],5)\r\n self.m_month_95[1,month] = np.nanpercentile(self.m_data_daily[day:num_days_in_month],95) \r\n self.m_month_50[month] = np.nanpercentile(self.m_data_daily[day:num_days_in_month],50) \r\n \r\n month = month + 1\r\n \r\n return", "def get_earliest_retirement(self):\n return self.years_to_retirement", "def interest(self, from_date, to_date):\n yearfrac = findates.daycount.yearfrac(from_date,\n to_date,\n \"30/360 US\")\n months = yearfrac * 12\n return Decimal((1.0 + \\\n self.annual_interest_rate / 12.0) ** months - 1.0)", "def calc_calories(gpx_track, wt = 175, activity='Run'):" ]
[ "0.7293272", "0.65816814", "0.64126164", "0.5913036", "0.59082484", "0.55546623", "0.55409116", "0.5488795", "0.54877865", "0.5483977", "0.54786175", "0.5446557", "0.54036194", "0.5401703", "0.53986675", "0.53903943", "0.53836673", "0.538216", "0.537716", "0.5362062", "0.5353387", "0.5331724", "0.5327209", "0.53181463", "0.530514", "0.52991915", "0.5299093", "0.5298179", "0.5289651", "0.52871555", "0.52762824", "0.52582365", "0.5248775", "0.52401525", "0.5238754", "0.5237037", "0.5233566", "0.5230909", "0.5229317", "0.52254874", "0.52221024", "0.52204555", "0.52181053", "0.52159923", "0.52058816", "0.51919496", "0.5191396", "0.5191396", "0.517755", "0.517195", "0.51519287", "0.51502615", "0.5149245", "0.5148444", "0.51346236", "0.51303023", "0.5121509", "0.5115311", "0.5099186", "0.50905836", "0.50898236", "0.50896424", "0.5079917", "0.50788516", "0.50720286", "0.5065521", "0.5059938", "0.5054801", "0.50483483", "0.5048118", "0.5043117", "0.5037541", "0.5035904", "0.50334764", "0.5028182", "0.5026467", "0.5025456", "0.50245076", "0.5021933", "0.50082463", "0.5004382", "0.49974796", "0.49974707", "0.49974707", "0.4997325", "0.49916628", "0.49886957", "0.49883616", "0.4984153", "0.49817067", "0.49733135", "0.49719802", "0.49697566", "0.49648586", "0.49600774", "0.49580553", "0.49529883", "0.49488434", "0.4947047", "0.49421093" ]
0.69910264
1
Get total fuel saved. Returns float the total fuel saved in gallons
def get_fuel_total_saved (self): #~ print self.lost_heat_recovery #~ print self.intertie_offset_generation_fuel_used #~ print self.pre_intertie_generation_fuel_used #~ gen_eff = self.cd["diesel generation efficiency"] #~ fuel_used = self.intertie_offset_generation / gen_eff generation_diesel_reduction = \ np.array(self.pre_intertie_generation_fuel_used\ [:self.actual_project_life]) return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\ generation_diesel_reduction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def get_total_energy(parameters):\n return orm.Float(parameters.get_attribute('energy'))", "def total_energy(self):\n return self._total_energy", "def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value", "def calculate_total_fuel(filename):\n return sum([calculate_fuel_from_mass(mass) for mass in read_mass_from_file(filename)])", "def get_total_haberes(self):\n return float(self.input.get_text(liquidaciones_historicas_catalog.TOTAL_HABERES).replace(\".\", \"\"))", "def get_total(self):\n\n base_price = self.get_base_price()\n\n # Christmas Melons are more x1.5 expensive than other melons\n if self.species == \"Christmas Melon\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def total(self) -> float:\n return self._total", "def cargo_fuel(self):\n return self._cargo_fuel", "def get_total(self):\n # method on the class DomesticMelonOrder\n base_price = 5\n\n if self.species == \"Christmas melons\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def get_total(self):\n\n base_price = self.get_base_price()\n if self.species == \"christmas melon\":\n base_price = base_price * 1.5\n\n total = ((1 + self.tax) * self.qty * base_price)\n\n return total", "def get_total(self):\n total = 0.00\n\n for _drink in self.drinks:\n total = total + _drink.get_price()\n\n for _food in self.food:\n total = total + _food.get_price()\n\n return total", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def get_total(self) -> float:\n if self.__open:\n raise RuntimeError(\"Cash drawer must be closed to count.\")\n total: float = 0.0\n for denom in CashDenomination:\n total += self.__contents[denom] * denom.amount\n return total", "def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "def get_total_supply() -> int:\n return total_supply", "def get_total(self):\n\n base_price = self.get_base_price()\n\n if self.species == \"Christmas\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def fuel_cost(self, update=False):\n if update or self._dfs['fuel_cost'] is None:\n self._dfs['fuel_cost'] = pudl.analysis.mcoe.fuel_cost(self)\n return self._dfs['fuel_cost']", "def get_remaining_fuel(self):\n return min(self.liquid_fuel, self.oxidizer)", "def total(self):\n if self.dynamic:\n self._update_db_obj()\n return self._db_obj.total", "def _calculate_fuel(self):\n self._fuel = self._calculate_fuel_r(self._mass)", "def total(self):\n\t\treturn self._total", "def GetTotal(self):\n return(self.total)", "def bus_line_total_miles(self) -> float:\n return self.dss_obj.BUSF(11, 0)", "def get_total_elle(self):\r\n \r\n return str(round(self._total_elle, 2))", "def get_total(self):\r\n \r\n return str(round(self._total, 2))", "def totalFireBonusDamage(self):\n return int(self._baseFireBonusDamage +\n self._equipmentFireBonusDamage +\n self._statusFireBonusDamage)", "def get_total(self):\n\n # Total sum\n self.sum = 0.00\n\n # Determine which Check buttons are selected\n # and add the charges to find the total\n if self.check_1.get() == 1:\n self.sum += 30.00\n if self.check_2.get() == 1:\n self.sum += 20.00\n if self.check_3.get() == 1:\n self.sum += 40.00\n if self.check_4.get() == 1:\n self.sum += 100.00\n if self.check_5.get() == 1:\n self.sum += 35.00\n if self.check_6.get() == 1:\n self.sum += 200.00\n if self.check_7.get() == 1:\n self.sum += 20.00\n\n # Convert the sum to string\n # and store in StringVar object\n # to automatically update the total_val label\n self.sum_str.set(self.sum)", "def calculate_total_fuel_recursively(filename):\n return sum([calculate_fuel_recursively(mass) for mass in read_mass_from_file(filename)])", "def calc_total_fuel(mass):\n fuel = fuel_for_mass(mass)\n\n if fuel < 0:\n return 0\n\n added_fuel = calc_total_fuel(fuel)\n return fuel + added_fuel", "def checkFuel(self):\n return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving", "def get_energy(self):\r\n return self._energy", "def get_energy():\n\n # open the psi4 log file\n with open(\"output.dat\", \"r\") as log:\n for line in log:\n if \"Total Energy =\" in line:\n return float(line.split()[3])\n\n raise EOFError(\"Cannot find energy in output.dat file.\")", "def get_total(self):\n\n base_price = 5\n \n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5 \n\n total = (1 + self.tax) * self.qty * base_price \n\n if self.order_type == \"international\" and self.qty>10:\n total += 3\n\n\n return total", "async def get_garages_total(self):\r\n async with self._db.acquire() as conn:\r\n s = await (await conn.execute(Garage.count())).scalar()\r\n return s", "def get_total_energy_produced (self):\n return self.pre_intertie_generation[:self.actual_project_life]", "def tot(self):\n return self.det + self.out + self.faint + self.late", "def calcularTotal(self):\n subtotales=[]\n for row in range(0,self.tableFactura.rowCount()):\n subtotales.append(float(self.tableFactura.item(row,2).text()))\n importeTotal=sum(subtotales)\n return importeTotal", "def som(getallenlijst):\r\n total = sum(getallenlijst)\r\n return total", "def total(self):\n return self._total_name", "def calculate_fuel(self, mass: int) -> int:\n fuel = math.floor(mass/3)-2\n return fuel", "def get_salario_total(self):\n s = 0\n for e in self.empleados:\n s += e.get_salario()\n return s", "def send_total_charge(self) -> float:\n charge = self.molecule.molecular_charge\n MDI_Send(charge, 1, MDI_DOUBLE, self.comm)\n return charge", "def GOAL_TOTAL() -> int:\n return 21", "def get_percent_oxygen(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[1]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'po read error: {err}')\n return -1", "def annual_energy(self):\n return self['annual_energy']", "def _compute_gasto_subtotal(self):\n beneficio = ingreso_subtotal - gasto_subtotal_comercial", "def fs_size_total(self):\n return self._fs_size_total", "def get_total_lui(self):\r\n \r\n return str(round(self._total_lui, 2))", "def total(self):\n total_price = self.get_total_amount()\n discounts = self.get_total_discount()\n\n return total_price - discounts", "def get_gof(self):\n gof = self.calculate_gof(self.data_sample, self.reference_sample)\n self.gof = gof\n return gof", "def get_gof(self):\n gof = self.calculate_gof(self.data_sample, self.reference_sample)\n self.gof = gof\n return gof", "def get_energy():\n\n # open the psi4 log file\n with open('output.dat', 'r') as log:\n lines = log.readlines()\n\n # find the total converged energy\n for line in lines:\n if 'Total Energy =' in line:\n energy = float(line.split()[3])\n break\n else:\n raise EOFError('Cannot find energy in output.dat file.')\n\n return energy", "def get(self) -> float:\n ...", "def fs_files_total(self):\n return self._fs_files_total", "def total_money(self) -> LocationInitialMoneyModel:\n return self._total_money", "def get_total(self):\n\n base_price=5\n if self.species == \"Christmas\":\n base_price=1.5*base_price\n \n total = (1 + self.tax) * self.qty * base_price\n\n if self.order_type==\"international\" and self.qty<10:\n total+=3\n\n return total", "def getFitness(self):\n\n if self.extraAsFitness!=None:\n return self.extra[self.extraAsFitness]\n \n fit = self.getMeanFitness('F')\n if (np.isnan(fit)):\n return 0 \n return float(int(fit*1000.0))/1000.0", "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy", "def getTotalReward(self):\n return self.lastFitness", "def get_value(\n self\n ) -> float:\n\n return self.average", "def get_total_redeem(self):\n total = 0\n for redeem in self.get_redeems():\n total += redeem.get_total()\n return total", "def calculate(self) -> float:", "def total_population(self):\n return self._total_population", "def total_population(self):\n return self._total_population", "def total(self) -> float:\n\n remained_to_be_taxed = self.income\n # taxed = list()\n self.tax_amounts = []\n start_tax_range = 0\n end_tax_range = self.bracket\n\n for i, b in enumerate(self.bracket):\n\n amount_to_tax = b.end - start_tax_range\n t = Taxed(min(amount_to_tax, remained_to_be_taxed), b.rate,\n min(amount_to_tax, remained_to_be_taxed) * b.rate)\n self.tax_amounts.append(t)\n # print(i, start_t ax_range, b.end, amount_to_tax, b.rate)\n\n remained_to_be_taxed -= amount_to_tax\n # print(remained_to_be_taxed)\n\n if b.end > self.income:\n break\n\n start_tax_range = b.end\n\n # print(taxed)\n return sum([t.tax for t in self.tax_amounts])", "def _calculate_fuel_simple(self):\n self._fuel_simple = (self.mass // 3) - 2", "def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))", "def total(self) -> int:\n return self._total", "def fuelconsumption(self):\n self.convert_window(\"Fuel Consumption\", \"liters/100 kilometer\", [\"car(2014 US Average)\", \"gallon(UK)/100 miles\", \"gallon(US)/100 miles\", \"kilometer/liter\", \"liters/100 kilometer\", \"liters/meter\", \"miles/gallon(UK)\", \"miles/gallon(US)\"])", "def native_value(self) -> float | None:\n if self.coordinator.data is None:\n return None\n\n prices = self.coordinator.data.prices\n return prices.get((self._station_id, self._fuel_type))", "def df_tot(self):\n return self._df_tot", "def total_storage(self):\n return self._total_storage", "def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)", "def ground_dps(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value]), None\n )\n if weapon:\n return (weapon.damage * weapon.attacks) / weapon.speed\n return 0", "def get_f_score(self):\n return self.get_g_score() + self.get_h_score()", "def read_gas_resistance(self):\n #Declare global variables\n global calAmbTemp\n\n self._force_read(True)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n gasResADC = (self._read_register_1ubyte(self.BME680_GAS_R_MSB) << 2) | (self._read_register_1ubyte(self.BME680_GAS_R_LSB) >> 6)\n gasRange = self._read_register_1ubyte(self.BME680_GAS_R_LSB) & 0x0F\n\n calAmbTemp = self._compensate_temperature(tempADC)\n val = self._calculate_gas_resistance(gasResADC, gasRange)\n\n return float(val)", "def fuel_prediction(self):\n\n return 0", "def ground_dps(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in {TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value}), None\n )\n if weapon:\n return (weapon.damage * weapon.attacks) / weapon.speed\n return 0", "def total_price(self):\n return self.owner.total_price()", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def get_total_descuentos(self):\n return float(self.input.get_text(liquidaciones_historicas_catalog.TOTAL_DESCUENTOS).replace(\".\", \"\"))", "def value(self) -> float:", "def total_market_value(self):\n return self.pos_handler.total_market_value()", "def total_fuel_required(mass):\n total = 0\n while mass > 0:\n fuel = fuel_required(mass)\n total += fuel\n mass = fuel\n return total", "def get_total(self):\n\n base_price = 5\n total = (1 + int(self.tax)) * int(self.qty) * base_price\n\n return total", "def fuel_calc(mass):\n return max((mass / 3) - 2, 0)", "def get_percentage(self):\n return self.PotTax_percentage", "def totalElectricBonusDamage(self):\n return int(self._baseElectricBonusDamage +\n self._equipmentElectricBonusDamage +\n self._statusElectricBonusDamage)", "def totalUnits(self):\n\t\treturn self.units", "def gals_per_steradian(self):\n return self._gals_per_arcmin2 * steradian_to_arcmin2", "def table_total(self):\n total = 0.00\n\n for customer in self.customers:\n total = total + customer.get_total()\n\n return total", "def total_cost(self):\n return (self.food_amount + self.local_transport_amount + self.other_expenses +\n self.travel_amount + self.accomodation_amount)", "def taxes(self) -> float:\n return self.total", "def calculate_fuel(mass):\n return math.floor(mass/3) - 2", "def get_salario_total_mensual(self):\n s = 0\n for e in self.empleados:\n s += e.get_salario_mensual()\n return s", "def get_total(self):\n\n total = super().get_total()\n if self.qty < 10:\n total += 3.00\n return total", "def sub_total():\n return sum(SAVE_PRICE)", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def getLoad(self) -> float:\n return self.load" ]
[ "0.824919", "0.67309505", "0.67212445", "0.6641931", "0.6640454", "0.65572923", "0.64542156", "0.6337531", "0.6322669", "0.6316941", "0.6301916", "0.6281241", "0.62791896", "0.6275253", "0.6265616", "0.61895245", "0.6137895", "0.61320716", "0.61031145", "0.6087333", "0.6077702", "0.60732543", "0.60173064", "0.6013059", "0.5992951", "0.5992199", "0.59789133", "0.5944898", "0.5942518", "0.5939736", "0.59139836", "0.5908313", "0.5893221", "0.5887382", "0.58824843", "0.58640736", "0.5862774", "0.5854287", "0.58529276", "0.58366144", "0.5811839", "0.58020645", "0.5791806", "0.57902914", "0.57647765", "0.5763302", "0.5750019", "0.57280123", "0.5718246", "0.5717214", "0.5717072", "0.5717072", "0.5703078", "0.5699216", "0.5698026", "0.5697724", "0.56799895", "0.5673369", "0.5657819", "0.56534314", "0.565232", "0.56459975", "0.56428796", "0.5642779", "0.5642779", "0.56348634", "0.5626423", "0.5625133", "0.56249654", "0.56075436", "0.5607501", "0.55990803", "0.55963296", "0.5587702", "0.55821943", "0.5577845", "0.557521", "0.5572738", "0.55713195", "0.55594945", "0.5559129", "0.5555578", "0.55511624", "0.5551153", "0.5545706", "0.55438703", "0.55438197", "0.5541464", "0.5540595", "0.55381805", "0.5538102", "0.5535812", "0.5535445", "0.5535401", "0.55309683", "0.5521093", "0.5514345", "0.55049103", "0.5504567", "0.5503555" ]
0.7722197
1
Get total energy produced. Returns float the total energy produced
def get_total_energy_produced (self): return self.pre_intertie_generation[:self.actual_project_life]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def total_energy(self):\n return self._total_energy", "def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def get_total_energy(parameters):\n return orm.Float(parameters.get_attribute('energy'))", "def get_energy(self):\r\n return self._energy", "def energy(self):\n return self._energy", "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def energy(self):\n return self.mc.energy(self.chain)", "def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy", "def get_energy():\n\n # open the psi4 log file\n with open(\"output.dat\", \"r\") as log:\n for line in log:\n if \"Total Energy =\" in line:\n return float(line.split()[3])\n\n raise EOFError(\"Cannot find energy in output.dat file.\")", "def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy", "def get_energy():\n\n # open the psi4 log file\n with open('output.dat', 'r') as log:\n lines = log.readlines()\n\n # find the total converged energy\n for line in lines:\n if 'Total Energy =' in line:\n energy = float(line.split()[3])\n break\n else:\n raise EOFError('Cannot find energy in output.dat file.')\n\n return energy", "def send_energy(self) -> float:\n # Ensure that the molecule currently passes validation\n if not self.molecule_validated:\n raise Exception(\"MDI attempting to compute energy on an unvalidated molecule\")\n self.run_energy()\n properties = self.compute_return.properties.dict()\n energy = properties[\"return_energy\"]\n MDI_Send(energy, 1, MDI_DOUBLE, self.comm)\n return energy", "def ComputeEnergyConsumption(self):\r\n pass", "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def getEnergy(self):\n energy = 0.0\n\n for i in range(0, self.nPoints):\n energy += self.tDomain[i] ** 2\n\n energy /= self.nPoints\n return energy", "def current_energy_produced(self):\n return self.df.exp.sum()", "def total_energy(sign, FS):\n time = compute_time(sign, FS)\n\n return np.sum(np.array(sign)**2)/(time[-1]-time[0])", "def KineticEnergy(self):\n return Particle.TotalEnergy(self) - Particle.RestEnergy(self)", "def energy(self):\n return self._accelerator.energy", "def energy(data):\n return sum(pow(data, 2))", "def energy_tot(P,F,H,molecule):\n return energy_el(P,F,H) + energy_n(molecule)", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the energy.\")\n return speed_light*math.sqrt(self.p*self.p + (self.mass*speed_light)**2)", "def energy(self):\n e = 0\n\n restoration = RestorationModel(self.graph_damaged)\n restoration.run(self.state)\n restoration_graphs = restoration.get_restoration_graphs()\n restoration_times = restoration.get_restoration_times()\n restoration_costs = restoration.get_restoration_costs()\n\n damaged = []\n damaged.append(get_delta(self.no_damage, self.initial_damage))\n\n sim_results = Parallel(n_jobs=4)(delayed(parallel_model)(\n graph, self.od_graph, self.od_matrix) for graph in restoration_graphs[:-1])\n for values in sim_results:\n damaged.append(get_delta(self.no_damage, values))\n\n for idx, values in enumerate(damaged):\n dt = restoration_times[idx] if idx == 0 else restoration_times[idx] - \\\n restoration_times[idx-1]\n e += sum(restoration_costs[idx]) + dt * (self.day_factor * values[2] * np.sum(self.mu*self.xi) +\n values[3] * np.sum(self.mu * (self.nu * self.F_w + self.rho)) + values[4] * self.upsilon)\n with open(self.fdir+'energy.csv', 'a') as f:\n f.write('\\n'+str(e))\n\n return e", "def total_KE(particles):\r\n return sum([particle.kinetic_energy() for particle in particles])", "def potentialEnergy(self):\n return 0.5*(pdist(self.positions)**2).sum()", "def energy(energy_name: str) -> float:\n pass", "def energy(ps):\n return kinetic_energy(ps) + potential_energy(ps)", "def total(self) -> float:\n return self._total", "def get_fuel_total_saved (self):\n #~ print self.lost_heat_recovery\n #~ print self.intertie_offset_generation_fuel_used\n #~ print self.pre_intertie_generation_fuel_used\n #~ gen_eff = self.cd[\"diesel generation efficiency\"]\n #~ fuel_used = self.intertie_offset_generation / gen_eff\n\n generation_diesel_reduction = \\\n np.array(self.pre_intertie_generation_fuel_used\\\n [:self.actual_project_life])\n return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\\\n generation_diesel_reduction", "def energy(self):\n sum_energy = 0.0\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (self.atoms[i].xyz - self.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n sum_energy = sum_energy + self.pair_energy(self.epsilon, self.sigma, mag_rij) \n return sum_energy", "def energy_percentage(self) -> Union[int, float]:\n if not self.proto.energy_max:\n return 0\n return self.proto.energy / self.proto.energy_max", "def energy_percentage(self) -> Union[int, float]:\n if not self.proto.energy_max:\n return 0\n return self.proto.energy / self.proto.energy_max", "def estimated_energy(self):\n energy = 0j\n for pauli_string, coef in self._pauli_coef_terms:\n a = self._zeros[pauli_string]\n b = self._ones[pauli_string]\n if a + b:\n energy += coef * (a - b) / (a + b)\n energy = complex(energy)\n if energy.imag == 0:\n energy = energy.real\n energy += self._identity_offset\n return energy", "def energy(self):\n nocc, ntot, gmo, e = self.nocc, self.ntot, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, ntot):\n for b in range(nocc, ntot):\n Ec += gmo[i, a, j, b]*(2*gmo[i, a, j, b] - gmo[i, b, j, a])/\\\n (e[i] + e[j] - e[a] - e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def energy(self):\n nocc, gmo, e = self.nocc, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, len(e)):\n for b in range(nocc, len(e)):\n Ec += (1/4.0) * gmo[i, j, a, b]**2 / (e[i]+e[j]-e[a]-e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2", "def get_energy(self):\n return self.bot_client.send_command(_Command.GetEnergy)", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def computeEnergy(self):\n _cgco.gcoComputeEnergy(self.handle, self.energyTempArray)\n return self._convertEnergyBack(self.energyTempArray[0])", "def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")", "def get_total_redeem(self):\n total = 0\n for redeem in self.get_redeems():\n total += redeem.get_total()\n return total", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def annual_energy(self):\n return self['annual_energy']", "def energyplus_its(self):\n if self._energyplus_its is None:\n self._energyplus_its = 0\n return self._energyplus_its", "def Total_energy(angles):\n energy = 0\n \n for i in range(0,4):\n energy += Single_spin_energy(i,angles)\n return energy", "def kinetic_energy(self):\r\n return self.mass * np.dot(self.vel, self.vel) / 2", "def RestEnergy(self):\n return (self.restMass * const.speed_of_light * const.speed_of_light)", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def computeEnergy(self):\n\t\tGmo = self.Gmo\n\t\te = self.e\n\t\tself.Ec = 0.0\n\n\t\tfor i in range( self.nocc ):\n\t\t\tfor j in range( self.nocc ):\n\t\t\t\tfor a in range( self.nocc,self.norb ):\n\t\t\t\t\tfor b in range( self.nocc,self.norb ):\n\t\t\t\t\t\tself.Ec += 0.25*(Gmo[i,j,a,b]*Gmo[a,b,i,j])/(e[i]+e[j]-e[a]-e[b])\n\n\t\treturn self.E0 + self.Ec", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def getEnergyAdded(self):\n return self.json_state.get(\"charging\").get(\"wh_energy\")", "def current_energy_consumed(self):\n return self.df.imp.sum()", "def get_value(\n self\n ) -> float:\n\n return self.average", "def total_kin_energy (self):\n total = 0. \n for planet in self.planets: #this loop takes each planet's kinetic energy and sums it with the others.\n total += planet.kenergy # the sum of the kinetic energies\n total_kin= total # system's kinetic energy\n \n return(total_kin)", "def incident_energy(self):\n return self._incident_energy", "def get_total(self) -> float:\n if self.__open:\n raise RuntimeError(\"Cash drawer must be closed to count.\")\n total: float = 0.0\n for denom in CashDenomination:\n total += self.__contents[denom] * denom.amount\n return total", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xrange(1,self.n):\r\n abs_velocity = np.sqrt(velocity[:,0]**2+velocity[:,1]**2\r\n + velocity[:,2]**2)\r\n KE = 0.5*self.m*abs_velocity**2\r\n total_KE = np.sum(KE)\r\n invid_KE = total_KE/self.Npart\r\n\r\n return total_KE, invid_KE", "def local_energy(self):\n state = self.current_state\n (matrix_elements, transitions) = \\\n self.hamiltonian.find_matrix_elements(state)\n energy_list = [self.nqs.amplitude_ratio(state, transitions[i]) * mel\n for (i, mel) in enumerate(matrix_elements)]\n return sum(energy_list)", "def energy(self):\n E = sum([1 for c in self.constraints if self._is_constraint_violated(c)])\n if E == 0:\n self._save_solution()\n print(\"exiting...\")\n exit()\n return E", "def get_total(self):\n\n base_price = self.get_base_price()\n if self.species == \"christmas melon\":\n base_price = base_price * 1.5\n\n total = ((1 + self.tax) * self.qty * base_price)\n\n return total", "def calculate_energy(self):\n temp_e = 0\n\n for i in range(0,self.neuron_count):\n for j in range(0, self.neuron_count):\n if i != j:\n temp_e += self.get_weight(i, j) * self.current_state[i] * \\\n self.current_state[j]\n return -1 * temp_e / 2", "def tot(self):\n return self.det + self.out + self.faint + self.late", "def get_total(self):\n\n base_price = self.get_base_price()\n\n # Christmas Melons are more x1.5 expensive than other melons\n if self.species == \"Christmas Melon\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def calculate(self) -> float:", "def get_total(self):\n\n base_price = self.get_base_price()\n\n if self.species == \"Christmas\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def total(self):\n\t\treturn self._total", "def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))", "def total_energy(self, KE, PE):\n TOT = KE+PE\n # Printing the amplitude to command line\n amplitude = max(TOT)-min(TOT)\n print('Amplitude of total energy during %i year(s): %g[AU²*kg/yr²]' \\\n %(self.t, amplitude))\n # Creating an axis for the time steps\n x = np.linspace(0, self.t, self.N*self.t+1)\n # Initializing the second figure\n plt.figure(figsize=(10, 10))\n # Creating the plot\n plt.plot(x, KE, x, PE, x, KE+PE)\n # Decorating the plot\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE', 'PE', 'KE+PE'], loc=2)", "def calc_energy_and_price(self) -> (float, float):\n\n cost_sum = 0\n energy_sum = 0\n for pump_id in self.pumps:\n pump_energy, pump_cost = self.pumps[pump_id].calculate_energy_and_cost()\n cost_sum += pump_cost\n energy_sum += pump_energy\n\n pump_id.append_index = 0\n\n assert energy_sum >= 0, \"The pumping energy cant be negative!\"\n assert cost_sum >= 0, \"The pumping cost cant be negative!\"\n return energy_sum, cost_sum", "def energy(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n pot = 4.0 * epsilon * (s12 - s6)\n return pot", "def energy_pfu(self):\n return self._energy_pfu", "def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "def getValue(self):\n result = 0.0\n for e in self.children:\n result += e.getValue()\n return result", "def _calc_energy( self, V_a, eos_d ):\n pass", "def compute_energy(self, protein): \n return utils.score_pose(protein.pose, self.scorefxn)", "def getEnergy(self, recalculate):\n\n if recalculate:\n try:\n calc = (GULP(keywords=self.params[\"gulp_keywords_calculate_energy\"][\"value\"],\n library=self.params[\"gulp_library\"][\"value\"]))\n self.structure.set_calculator(calc)\n self.energy = self.structure.get_potential_energy()\n self.numberOfSinglePointCalculations += 1\n return True, self.energy / self.numberOfAtoms\n except:\n return False, 0\n\n else:\n return True, self.energy / self.numberOfAtoms", "def get_cost(self) -> float:\n return math.e / self.fitness", "def check_overall_energy(self):\n energy = 0\n for student in self.students:\n energy += int(student.energy_level)\n for mentor in self.mentors:\n energy += int(mentor.energy_level)\n print(\"Overall energy equals \", energy)", "def get_total(self):\n # method on the class DomesticMelonOrder\n base_price = 5\n\n if self.species == \"Christmas melons\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def total_volume(self):\n v = self.cell_edges\n v = np.abs(v[-1] - v[0])\n return v", "def compute_energy(self):\n\n # radiation energy\n Qsqrd = self.omega_coords[:,:,1]*self.omega_coords[:,:,1]\n Psqrd = self.omega_coords[:,:,0]*self.omega_coords[:,:,0]\n\n e_rad = (Psqrd/self.mode_mass + (self.mode_mass*self.omega**2)*Qsqrd)*.5\n\n # space charge energy\n Dsqrd = self.dc_coords[:,:,0]*self.dc_coords[:,:,0]\n\n e_drft = Dsqrd/(2.*self.mode_mass)\n\n energy = e_rad+e_drft\n\n return energy", "def get_total(self, _property=None):\n return self._get_total(\"event\", _property)", "def get_total_haberes(self):\n return float(self.input.get_text(liquidaciones_historicas_catalog.TOTAL_HABERES).replace(\".\", \"\"))", "def energy(nx,ny):\n return 1+nx+ny", "def energy_atom(atom,layer):\n global r,c,h\n backval= r*((atom**2/layer**2))\n return float('%.2E' % Decimal(str(backval)))", "def total_mass_au(self):\n return np.sum(self.atomic_mass)", "def calc_excess_energy (self):\n #~ print sorted(self.cd.keys())\n self.excess_energy = \\\n (self.generation_wind_proposed - self.transmission_losses) * \\\n (self.cd['percent excess energy'] / 100.0)\n #~ print 'self.excess_energy',self.excess_energy", "def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)", "def get_e(self):\n return self.e_min + self.e_ * self.e_range", "def total_market_value(self):\n return self.pos_handler.total_market_value()", "def calculate_total_pump_volume(self) -> float:\n volume_sum = 0\n for interval in self.flow_reads:\n # volume_sum += interval.calculate_volume() TODO: finish this\n pass\n\n assert volume_sum >= 0\n\n return volume_sum", "def energy(n):\n return (n * pi * hbar / (2 * a)) ** 2 / (2 * m)", "def get_energy(self, position):\n\n # update the positions of the system\n self.simulation.context.setPositions(position)\n\n # Get the energy from the new state\n state = self.simulation.context.getState(getEnergy=True)\n\n energy = state.getPotentialEnergy().value_in_unit(unit.kilocalories_per_mole)\n\n return energy" ]
[ "0.8479409", "0.79218054", "0.779159", "0.7745281", "0.7656375", "0.7550641", "0.7512186", "0.75108033", "0.75108033", "0.75029075", "0.7464623", "0.73755133", "0.7374632", "0.7320508", "0.72703177", "0.72320104", "0.7221515", "0.7218909", "0.71979094", "0.71671396", "0.712799", "0.70854396", "0.7064693", "0.69941986", "0.699368", "0.69824845", "0.6961419", "0.69538796", "0.6944146", "0.6923296", "0.6888259", "0.6861608", "0.685303", "0.68358636", "0.6834671", "0.6834671", "0.68297696", "0.6818299", "0.68130845", "0.6793389", "0.67921627", "0.6791762", "0.67892826", "0.67347777", "0.67061484", "0.6695926", "0.66876423", "0.66626346", "0.665853", "0.66519314", "0.6641111", "0.66335684", "0.6595703", "0.6589705", "0.6587993", "0.6563843", "0.65635526", "0.6542474", "0.6529233", "0.6528604", "0.65179545", "0.650968", "0.6503369", "0.64917624", "0.64884925", "0.6481715", "0.6474568", "0.6467137", "0.64572185", "0.6455722", "0.6443065", "0.64315957", "0.64208525", "0.64187056", "0.64171547", "0.6411292", "0.63942206", "0.6383955", "0.63813275", "0.636837", "0.63672894", "0.6367257", "0.63651824", "0.6362466", "0.63503706", "0.63503146", "0.63331693", "0.6329437", "0.63226277", "0.6314522", "0.63130873", "0.6308671", "0.63076144", "0.62840307", "0.62728405", "0.62707186", "0.6264431", "0.62592995", "0.62386787", "0.622994" ]
0.77342874
4
Get your current running jobs on the Sherlock cluster
def running_jobs_sherlock(): user = os.environ['USER'] return subprocess.check_output(['squeue', '-u',user,'-o','%Z']).split()[1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs", "async def get_jobs(): \n return mngr.getAllJobs()", "def jobs(self):\n return self.get_jobs()", "def jobs(self):\n return self._jobs", "def _get_jobs():\n return _get_bigquery_service().jobs()", "def get_running_jobs(api_instance):\n namespace = \"default\"\n try:\n api_response = api_instance.list_namespaced_job(namespace)\n except ApiException as e:\n logger.exception(\"Exception while receiving running Jobs{}\".format(e))\n return api_response", "def list_jobs():\n\n name_to_job_details = redis_controller.get_name_to_job_details()\n return list(name_to_job_details.values())", "def jobs():\n result = []\n out = subprocess.check_output([\"/bin/launchctl\", \"list\"]).decode()\n for row in out.splitlines()[1:]:\n result.append(Job(row))\n return result", "def get_job_list(self):\n return self.job_list", "def get_job_list(self):\n return self.job_list", "def info(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s\" % (self.sessionid,\n self.name))\n return self.server.json_body(resp)", "def get(self):\n # TODO: auth\n return list(self.app.db.jobs.find())", "def get_job_list(self):\n job_list = []\n if mysql.job_list() == None:\n return job_list\n return mysql.job_list()", "def get_jobs(self):\n return list(self._jobs.values())", "def job(job_name):\n ClientID = Job.get_client_id(job_name)\n return tasks_for_client_job(ClientID, job_name)", "def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)", "def getWorkers(self):\n return self.workers", "def current_job(self):\n assert(ExecutorThread.executor_object is not None)\n return self.__job", "def get_waiting_jobs(self):\n return []", "def ListJobs(self, token=None):\n return aff4.FACTORY.Open(self.CRON_JOBS_PATH, token=token).ListChildren()", "def list(self):\n self.background_scheduler.print_jobs()", "def get_jobs():\n \n rate_limit()\n command = [\"bjobs\", \"-o\", \"\\\"JOBID\", \"USER\", \"STAT\", \"QUEUE\", \"JOB_NAME\", \\\n \"delimiter=';'\\\"\"]\n command = \" \".join(command)\n jobs = subprocess.check_output(command, shell=True, stderr=open(os.devnull))\n \n # if there aren't any currently running or pending jobs, then the output\n if jobs == \"\":\n return set([])\n \n jobs = jobs.decode().strip().split(\"\\n\")\n \n current_jobs = set([])\n for line in jobs:\n if line.startswith(\"JOBID\"): # ignore the header line\n continue\n \n line = line.split(\";\")\n job_name = line[4]\n current_jobs.add(job_name)\n \n return current_jobs", "def jobs(self):\n raise NotImplementedError()", "def list_jobs(arn=None, nextToken=None):\n pass", "def running_procs(self) -> List[int]:\n return [p.model_id for p in self.primary_scheduler.queue_nodes.run_q]", "def list_running_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.active()", "def get_executed_jobs(self):\n with self.__lock:\n return list(self.__executed_jobs)", "async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))", "def workers(self):\n return self.worker_list", "def _get_njobs_in_queue(self, username):", "def ListJobs(cls):\n return [key.parent().string_id() for key in cls.query().fetch(\n 100, keys_only=True)]", "def get_registered_jobs(self):\n with self.__lock:\n return list(self.__registered_jobs)", "def getNode(self):\r\n try:\r\n output,error = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()\r\n if self.jobId in output:\r\n return output.split(\"\\t\")[7]\r\n if len(error) > 0:\r\n logging.error(error)\r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def get_jobs_connection(self):\n return self.m_connection.jobs", "def latest_job(self):\n return self.jobmanagers[self.current_network].latest_job", "def get_jobs(self, *, params: Optional[dict] = None) -> \"resource_types.Jobs\":\n\n return communicator.Jobs(self.__requester).fetch(parameters=params)", "def job_info(self):\n def _sortkey(x):\n return x['job_name']\n\n resp = self._cmd(uri = '/jenkins_jobs')\n jobs = resp.get('jobs', [])\n return sorted(jobs, key=_sortkey)", "def get_jobs(self, jobstore=None):\n\n return self._scheduler.get_jobs(jobstore)", "def jobs(self):\n return self.properties.get('jobs',\n EntityCollection(self.context, SynchronizationJob,\n ResourcePath(\"jobs\", self.resource_path)))", "def workers(self):\n return list(self._workers.keys())", "def get_jobs_in_queue() -> List[int]:\n output = subprocess.check_output([\"qstat\"]).decode().splitlines()\n job_ids = []\n for line in output:\n m = REGEX_QSTAT.match(line)\n if m:\n job_ids.append(int(m.group(1)))\n return job_ids", "def workloads(self):\n return self._workloads", "def get_all_jobs(self):\n all_jobs = self.job_set.all().order_by(\"-time_last_updated\", \"project__name\", \"-id\")\n # for job in all_jobs:\n # job.check_exists()\n\n # get the list of jobs listed in the database as running and update them.\n dbrunning = all_jobs.filter(state__in=['in queue', 'started'])\n for runningjob in dbrunning: runningjob.update();\n\n # get the updated list \n all_jobs = self.job_set.all().order_by(\"-time_last_updated\", \"project__name\", \"-id\")\n\n return all_jobs", "def jobs(self):\n return JobManager(self)", "def jobs(self):\n return JobManager(session=self._session)", "def give_workers_list(self):\n return self._workers", "def get_running_condor_jobs(self):\n return Utils.condor_q(selection_pairs=[[\"taskname\",self.unique_name]], extra_columns=[\"jobnum\"])", "def get_job_names(self):\n return []", "def get_waiting_jobs(self):\n open_jobs = []\n with closing(self._conn.cursor()) as cursor:\n for row in cursor.execute( \"select job_name, job_version from jobs where job_state in ('\"\n + JobState.WAITING.value + \"','\" + JobState.WAITING_PRED.value + \"','\" + JobState.RUNNING.value +\"')\"):\n open_jobs.append((row[0], row[1]))\n return open_jobs", "def active_jobs(self):\n \n active_jobs = []\n for job in self._jobs:\n if job.active:\n job.backend.status( job )\n active_jobs.append( job )\n\n self._active_jobs = active_jobs[:]\n\n return active_jobs", "def getClientJobsInformation(client):\n # getSlaveForDispatch()\n #jobs = mongo.db.jobs.find({'owner': client, 'is_active': True})\n jobs = mongo.db.jobs.find({'is_active': True})\n\n # result = i.title()\n # if any([s.get('status')=='on progress' for s in tasks]):\n # result = 'On Progress'\n # return result\n\n result = [{\n 'name': j.get('name'),\n 'datetime': j.get('datetime'),\n 'status': getJobStatus(j),\n 'priority': j.get('priority'),\n 'progress': sum([t.get('progress') for t in mongo.db.tasks.find({'job': j.get('_id')})]) /\n (mongo.db.tasks.find({'job': j.get('_id')}).count() or -1),\n 'id': str(j.get('_id')),\n 'tasks_count': mongo.db.tasks.find({'job': j.get('_id'), 'is_active': True}).count(),\n 'failed_count': mongo.db.tasks.find({'job': j.get('_id'), 'is_active': True, 'status': 'failed'}).count(),\n 'completed_count': mongo.db.tasks.find({'job': j.get('_id'), 'is_active': True, 'status': 'completed'}).count(),\n 'active_task': 'Frame 43',\n } for j in jobs]\n return result or {}", "def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()", "def list_jobs(self):\n\n return dict(self._from_json(self.manage.run(override=\"list-jobs\")))", "def jobserver_running_jobs():\n\n if _MakeJobServer._singleton is None:\n return '?'\n\n try:\n buf = array.array('i', [0])\n if fcntl.ioctl(_MakeJobServer._singleton.job_pipe[0], FIONREAD, buf) == 0:\n return _MakeJobServer._singleton.num_jobs - buf[0]\n except NotImplementedError:\n pass\n except OSError:\n pass\n\n return _MakeJobServer._singleton.num_jobs", "def jobs(self) -> List[Job]:\n return self._jobs.values()", "def background_worker_pool(self):\r\n return self.run_tracker.background_worker_pool()", "def get_jobs(dumpruninfo):\n if \"jobs\" not in dumpruninfo:\n return []\n return dumpruninfo[\"jobs\"].keys()", "def get_jobs():\n jobs = [os.path.join(JOBS_DIR, job)\n for job in os.listdir(JOBS_DIR)\n if job != '.gitignore']\n return jobs", "def join(self):\n ret = []\n logger.info(\"CondorEngine.join\")\n if self.clusterId:\n cmd = [\"condor_q\", \"-json\", self.clusterId]\n while True:\n time.sleep(2)\n cmdout = subprocess.check_output(cmd)\n if cmdout:\n tasks = json.loads(cmdout.decode('utf-8'))\n if not cmdout or len(tasks) == 0:\n break\n\n status = dict()\n cmdout = subprocess.check_output(\n [\"condor_history\", '-json', self.clusterId])\n hist = json.loads(cmdout.decode('utf-8'))\n for jobinfo in hist:\n taskid = jobinfo.get('ProcId', 0)\n status[taskid] = 'C' if jobinfo.get('JobStatus', 1) == 4 else \\\n 'X' if jobinfo.get('JobStatus', 1) == 3 else 'E'\n logger.info('status %d: %s', taskid, status[taskid])\n if status[taskid] != 'X':\n self.job.setExitStatus(taskid, status[taskid])\n\n for k in sorted(status.keys()):\n ret.append(status[k])\n logger.info('finished cluster %s', self.clusterId)\n else:\n logger.warn('no condor cluster')\n\n return ret", "def get_jobs(self, age=1):\n jobs_for_reaper = []\n try: \n api_response = self.kube_v1_batch_client.list_namespaced_job(namespace=self.project, label_selector='job-origin=pman', include_uninitialized=True)\n for item in api_response.items:\n # Checking if job has finished running, either failed or succeeded\n if item.status.conditions and (item.status.failed or item.status.succeeded):\n # Using start_time because failed jobs have no completion_time\n start_time = item.status.start_time\n current_time = datetime.datetime.now(datetime.timezone.utc)\n diff = current_time-start_time\n # 86400 = number of seconds in a day. \"divmod\" returns quotient and remainder as tuple e.g (1, 5.74943)\n # means 1 day and 5.74943 sec have passed between current_time and start_time of the job\n diff_in_seconds = divmod(diff.total_seconds(), 86400)\n if diff_in_seconds[0] >= 1:\n jobs_for_reaper.append(item.metadata.name)\n \n except ApiException as e:\n print(\"Exception when calling BatchV1Api->list_namespaced_job: %s\\n\" % e)\n exit(1)\n return jobs_for_reaper", "def getCondorRunningJobs(user = None):\n if not user:\n user = getpass.getuser()\n\n command = ['condor_q', user]\n pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)\n stdout, error = pipe.communicate()\n\n output = stdout.split('\\n')[-2]\n\n nJobs = int(output.split(';')[0].split()[0])\n\n return nJobs", "def slurm(jobs, threads, tmp_dir):\n # Try to use the following tools in this \n # order to get job information!\n # [1] `dashboard_cli` is Biowulf-specific\n # [2] `sacct` should always be there\n tool_priority = ['dashboard_cli', 'sacct']\n job_tool = get_toolkit(tool_priority)\n # Get information about each job\n # must use eval() to make string\n # to callable function\n eval(job_tool)(jobs=jobs, threads=threads, tmp_dir=tmp_dir)", "def num_worker(self):\n return self.config.get(\"jobs\", 4)", "def forks(self):\r\n return forks.Forks(self)", "def get_jobs(bmc, only_unfinished):\n jobs = bmc.list_jobs(only_unfinished)\n return namedtuples_to_dicts(jobs)", "def list(self):\n\n for job_name in self.upstart.get_all_jobs():\n yield self.get_service(job_name)", "def get_jobs(self, label_selector, namespace):\n return self.batch_client.list_namespaced_job(namespace, label_selector=label_selector, watch=False)", "def get_njobs_in_queue(self, username=None):\n if username is None: username = getpass.getuser()\n njobs, process = self._get_njobs_in_queue(username=username)\n\n if process is not None and process.returncode != 0:\n # there's a problem talking to squeue server?\n err_msg = ('Error trying to get the number of jobs in the queue' +\n 'The error response reads:\\n {}'.format(process.stderr.read()))\n logger.critical(err_msg)\n\n if not isinstance(self, ShellAdapter):\n logger.info('The number of jobs currently in the queue is: {}'.format(njobs))\n\n return njobs", "def get_jobs(self, type = None):\n joblist = JobList()\n for jobs in self.sm.get_jobs(type = type):\n joblist.add_job(jobs['identifier'], jobs['phase'])\n return joblist.tostring()", "def jobs(self):\n return JobCollection(client=self)", "def job_ids(self):\n return self.get_job_ids()", "def get_jobs(self):\n return self.my_user_cron.find_comment(CRONTAB_COMMENT)", "def queue_job_ids(self):\n return list(self.queue.keys())", "def get_jobqueue_cluster(walltime='12:00', ncpus=1, cores=1, local_directory=None, memory='15GB', env_extra=None, **kwargs):\n import dask\n # this is necessary to ensure that workers get the job script from stdin\n dask.config.set({\"jobqueue.lsf.use-stdin\": True})\n from dask_jobqueue import LSFCluster\n import os\n\n if env_extra is None:\n env_extra = [\n \"export NUM_MKL_THREADS=1\",\n \"export OPENBLAS_NUM_THREADS=1\",\n \"export OPENMP_NUM_THREADS=1\",\n \"export OMP_NUM_THREADS=1\",\n ]\n\n if local_directory is None:\n local_directory = '/scratch/' + os.environ['USER'] + '/'\n\n cluster = LSFCluster(queue='normal',\n walltime=walltime,\n ncpus=ncpus,\n cores=cores,\n local_directory=local_directory,\n memory=memory,\n env_extra=env_extra,\n job_extra=[\"-o /dev/null\"],\n **kwargs)\n return cluster", "def query_jobs(repo_name, revision):\n return buildapi.query_jobs_schedule(repo_name, revision)", "def get_all_jobs(self) -> List[DocumentReference]:\n return self.get_all_documents(Type._JOBS)", "def job(self):\n\n if self.current_bead is None:\n return None\n\n if self.jobs is None:\n RosProxy().notify(\"Can not find jobs.\", STATE.ERROR)\n return None\n\n _job = None\n for job in self.jobs.configurations:\n if job.job_number == self.current_bead.wps_job_number:\n return job\n\n return None", "def _ls_waiting_jobs(self):\n \n jobs = [j for j in os.listdir(pjoin(self._jobsdir, \"00_waiting\")) if j.endswith(self._job_ext)]\n \n if self._job_filter:\n jobs = [j for j in jobs if self._job_filter(pjoin(self._jobsdir, \"00_waiting\", j), j)]\n \n return jobs", "def get_server_job_ids(self):\n self.server_job_ids = list()\n for server in self.servers:\n if server != 'local':\n with SSHClient(server) as ssh:\n self.server_job_ids.extend(ssh.check_running_jobs_ids())\n else:\n self.server_job_ids.extend(check_running_jobs_ids())", "def job(self):\n return self.batch[self.job_id]", "def getJobList_impl(self):\n my_infos = TestJob.objects.filter(\n (Q(job_status='Running')|Q(job_status='Submitted')|Q(job_status='Incomplete'))\n &Q(check_or_not=True)\n )\n\n if not connection.in_atomic_block:\n self._commit_transaction(src='getInfosList_impl')\n print(\"###\", my_infos)\n logger.info(my_infos)\n return my_infos", "def queue_all_instances(self):\n if not self.is_job:\n return []\n\n tasks_list = []\n for job_instance in self.instances:\n tasks_list.append(job_instance.queue())\n\n self.status = 'QUEUED'\n return tasks_list", "def get_job_information(run_id):\n cmd = [github_cli, 'run', 'view', str(run_id), '--json', 'jobs']\n with subprocess.Popen(cmd, stdout=subprocess.PIPE) as p:\n result, err = p.communicate()\n print(err)\n return json.loads(result)['jobs']", "def get_job(arn=None):\n pass", "def ls(self):\n server = jenkins_server.get_jenkins_server()\n queue = server.get_queue_info()\n print('任务ID\\t%s\\t原因' % '任务链接'.ljust(50))\n for q in queue:\n print('%d\\t%s\\t%s' % (q['id'], q['task']['url'].ljust(50), q['why']))", "async def running(self) -> list[dict[str, Any]]:\n data = await self.controller.request(\"get\", \"watering/program\")\n return cast(list[dict[str, Any]], data[\"programs\"])", "def _get_grid_jobs():\n output = _general_qstat()\n if not output:\n return None\n tree = ElementTree.fromstring(output)\n jobs = []\n for job in tree.iter('job_list'):\n job_number = job[0].text\n output = subprocess.check_output(\"qstat -j %s -xml\" % job[0].text, shell=True)\n job_tree = ElementTree.fromstring(output)[0][0] # First index is djob_info, second is element\n time_str = _get_job_tree_text(job_tree, \"JB_submission_time\")\n try:\n start_time = int(job_tree.find(\"JB_ja_tasks\")[0].find(\"JAT_start_time\").text)\n except (TypeError, AttributeError):\n # TypeError if JB_ja_tasks not in the tree (which will happen if not started)\n # AttributeError if JAT_start_time not in the subtree\n start_time = 0\n jobs.append({\n \"job_number\": int(job_number),\n \"script\": _get_job_tree_text(job_tree, \"JB_script_file\"),\n \"submission_time\": int(time_str) if time_str else 0,\n \"start_time\": start_time,\n \"cwd\": _get_job_tree_text(job_tree, \"JB_cwd\"),\n })\n return jobs", "def get_all_current_processes():\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out", "def getContext(self):\n context = {}\n result = {}\n service = backendservices()\n # Grab references to all the user's StochKitJobs in the system\n all_stochkit_jobs = db.GqlQuery(\"SELECT * FROM StochKitJobWrapper WHERE user_id = :1\", self.user.email_address)\n if all_stochkit_jobs == None:\n context['no_jobs'] = 'There are no jobs in the system.'\n else:\n # We want to display the name of the job and the status of the Job.\n all_jobs = []\n status = {}\n \n for job in all_stochkit_jobs.run():\n \n # Get the job id\n stochkit_job = job.stochkit_job\n \n # Query the backend for the status of the job, but only if the current status is not Finished\n if not stochkit_job.status == \"Finished\":\n try:\n if stochkit_job.resource == 'Local':\n # First, check if the job is still running\n res = service.checkTaskStatusLocal([stochkit_job.pid])\n if res[stochkit_job.pid]:\n stochkit_job.status = \"Running\"\n else:\n # Check if the signature file is present, that will always be the case for a sucessful job.\n # for ssa and tau leaping, this is means.txt\n # for ode, this is output.txt\n\n if stochkit_job.exec_type == 'stochastic':\n file_to_check = stochkit_job.output_location+\"/result/stats/means.txt\"\n else:\n file_to_check = stochkit_job.output_location+\"/result/output.txt\"\n \n if os.path.exists(file_to_check):\n stochkit_job.status = \"Finished\"\n else:\n stochkit_job.status = \"Failed\"\n \n elif stochkit_job.resource == 'Cloud':\n # Retrive credentials from the datastore\n if not self.user_data.valid_credentials:\n return {'status':False,'msg':'Could not retrieve the status of job '+stochkit_job.name +'. Invalid credentials.'}\n credentials = self.user_data.getCredentials()\n\n # Check the status on the remote end\n taskparams = {'AWS_ACCESS_KEY_ID':credentials['EC2_ACCESS_KEY'],'AWS_SECRET_ACCESS_KEY':credentials['EC2_SECRET_KEY'],'taskids':[stochkit_job.pid]}\n task_status = service.describeTask(taskparams)\n job_status = task_status[stochkit_job.pid]\n # It frequently happens that describeTasks return None before the job is finsihed.\n if job_status == None:\n stochkit_job.status = \"Unknown\"\n else:\n\n if job_status['status'] == 'finished':\n # Update the stochkit job \n stochkit_job.status = 'Finished'\n stochkit_job.output_url = job_status['output']\n stochkit_job.uuid = job_status['uuid']\n \n elif job_status['status'] == 'Failed':\n stochkit_job.status == 'Failed'\n elif job_status['status'] == 'pending':\n stochkit_job.status = 'Pending'\n else:\n # The state gives more fine-grained results, like if the job is being re-run, but\n # we don't bother the users with this info, we just tell them that it is still running. \n stochkit_job.status == 'Running'\n \n except Exception,e:\n result = {'status':False,'msg':'Could not determine the status of the jobs.'+str(e)}\n \n all_jobs.append(stochkit_job)\n # Save changes to the status\n job.put()\n \n context['all_jobs']=all_jobs\n \n return dict(result,**context)", "def jobsUrl(self):\n return self.sdaUrl + \"/jobs\"", "def get_jobs(k8s_ctx: str, selector: Optional[str] = None, dry_run: bool = False) -> List[str]:\n cmd = 'kubectl --context={k8s_ctx} get jobs -o json'\n if selector is not None:\n cmd += f' -l {selector}'\n if dry_run:\n logging.info(cmd)\n return list()\n\n p = safe_exec(cmd)\n if not p.stdout:\n # a small JSON structure is always returned, even if there are no jobs\n raise RuntimeError('Unexpected lack of output for listing kubernetes jobs')\n out = json.loads(p.stdout.decode())\n return [i['metadata']['name'] for i in out['items']]", "def get_n_jobs(self):\n return self.n_jobs", "def run_jobs(**kwargs): # pylint: disable=W0613\n\n root_nodes, job_instances_map = build_graph(ctx.nodes)\n monitor = Monitor(job_instances_map, ctx.logger)\n\n # Execution of first job instances\n tasks_list = []\n for root in root_nodes:\n tasks_list += root.queue_all_instances()\n monitor.add_node(root)\n wait_tasks_to_finish(tasks_list)\n\n # Monitoring and next executions loop\n while monitor.is_something_executing() and not api.has_cancel_request():\n # Monitor the infrastructure\n monitor.update_status()\n exec_nodes_finished = []\n new_exec_nodes = []\n for node_name, exec_node in monitor.get_executions_iterator():\n if exec_node.check_status():\n if exec_node.completed:\n exec_node.clean_all_instances()\n exec_nodes_finished.append(node_name)\n new_nodes_to_execute = exec_node.get_children_ready()\n for new_node in new_nodes_to_execute:\n new_exec_nodes.append(new_node)\n else:\n # Something went wrong in the node, cancel execution\n cancel_all(monitor.get_executions_iterator())\n return\n\n # remove finished nodes\n for node_name in exec_nodes_finished:\n monitor.finish_node(node_name)\n # perform new executions\n tasks_list = []\n for new_node in new_exec_nodes:\n tasks_list += new_node.queue_all_instances()\n monitor.add_node(new_node)\n wait_tasks_to_finish(tasks_list)\n\n if monitor.is_something_executing():\n cancel_all(monitor.get_executions_iterator())\n\n ctx.logger.info(\n \"------------------Workflow Finished-----------------------\")\n return", "def jobId(self):\n returnself._ShREEKConfig.jobId()", "def threads(self):\n return self.rpc.call(MsfRpcMethod.CoreThreadList)", "def get_job_query(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n mt = getToolByName(self, 'portal_membership') \n currentUser = mt.getAuthenticatedMember() \n \n if \"Site Administrators\" not in currentUser.getGroups():\n\treturn catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job', \t\t\t\t Creator = currentUser.getUserName())\n else: \n return catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job')", "def get_queue_list(self):\n return self.manager.get_queue_list()", "async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues" ]
[ "0.7408034", "0.72631824", "0.7173004", "0.6856391", "0.6788189", "0.6787631", "0.6698274", "0.6687841", "0.6678214", "0.6678214", "0.66645783", "0.6657631", "0.66119254", "0.65800935", "0.6543887", "0.65284514", "0.64695036", "0.6460383", "0.64549667", "0.64276236", "0.6394917", "0.6378155", "0.6356756", "0.63484544", "0.6314699", "0.6306425", "0.6291282", "0.6276802", "0.62740475", "0.6262553", "0.6256168", "0.62424654", "0.6241257", "0.623269", "0.6223613", "0.6187886", "0.6182987", "0.61744654", "0.6157018", "0.61519283", "0.6144777", "0.6141549", "0.61258787", "0.6121452", "0.6097435", "0.60713035", "0.6070877", "0.6067147", "0.60479647", "0.6039388", "0.60257995", "0.6023359", "0.6019427", "0.60146004", "0.601225", "0.5988627", "0.5986525", "0.5941645", "0.5923916", "0.5923396", "0.59183", "0.59165525", "0.59033364", "0.58978003", "0.5885069", "0.58557546", "0.58522326", "0.5851198", "0.58411247", "0.5838887", "0.58321214", "0.5827155", "0.58245957", "0.58109635", "0.5809856", "0.58007294", "0.5785396", "0.57836556", "0.57769835", "0.57662916", "0.5727809", "0.5725928", "0.57146615", "0.57002354", "0.56960076", "0.56856185", "0.5683132", "0.56801546", "0.5679172", "0.56644464", "0.56600296", "0.56572056", "0.56531936", "0.5635104", "0.56271964", "0.561779", "0.5617626", "0.56094426", "0.56092227", "0.56092227" ]
0.76263565
0
Make directories even if they already exist
def safeMkDir(pth ,verbose ) : try: os.mkdir(pth) except OSError: if verbose: print('directory %s already exists ?!'%pth)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_directories(self):\n os.makedirs(self.data_dir, exist_ok=True)\n os.makedirs(self.patches_dir, exist_ok=True)\n os.makedirs(self.raw_image_dir, exist_ok=True)\n os.makedirs(self.pro_image_dir, exist_ok=True)\n os.makedirs(self.results_dir, exist_ok=True)", "def make_folders(self):\n\t\tfor name in self.folders:\n\t\t\tos.makedirs(self.path+\"/\"+name,exist_ok=True)", "def make_dirs():\n global paths_made\n\n #Have we done this already? Then why are we trying to do it again?\n if paths_made:\n return\n\n #Make the dirs\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(datastream_dir, exist_ok=True)\n paths_made = True", "def mkdirs():\n if os.path.exists(DIST_DIR):\n shutil.rmtree(DIST_DIR)\n else:\n os.makedirs(BUILD_DIR)", "def remake_directories(*dirnames):\r\n for d in dirnames:\r\n d = path(d)\r\n if d.exists():\r\n d.rmtree()\r\n d.mkdir()\r\n return", "def initialize_directories(): # pragma: no cover\n\n for i in (CACHE_DIR, CONFIG_DIR):\n i.mkdir(parents=True, exist_ok=True)", "def build_directories(self):\n print(\"Building Directories...\")\n\n path_1 = \"./saved_models/model_1/\"\n\n if not os.path.exists(path_1):\n os.mkdir(path_1, 0755)\n print(\"Completed directories creation or if already exist - then checked\")", "def make_dirs(path):\n\tif not os.path.exists(path):\n\t\treturn os.makedirs(path)", "def make_dirs(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def make_directories():\n os.mkdir('principal_wings')\n os.mkdir('random_wings')", "def _check_dirs(self):\r\n for dir in [self.papers_dir,\r\n self.buffer_dir]:\r\n if not os.path.exists(dir):\r\n message = f'Dir not exists: {dir}. Making it.'\r\n logging.warning(message)\r\n os.mkdir(dir)", "def make_dirs(dirs):\n\n for d in dirs:\n if not os.path.exists(d):\n try:\n os.mkdir(d)\n except OSError as e:\n if e.errno != 17:\n raise", "def make_dir_if_needed(dir) :\n\tif not exists(dir) :\n\t\tos.makedirs(dir)", "def make_dir(self):\n if not os.path.exists(self.d):\n try:\n os.mkdir(self.d)\n except OSError, e:\n if e.errno != 17:\n raise\n pass", "def ensure_dirs_exists(self):\n os.makedirs(os.path.join(self.location, \"batches\"), exist_ok=True)\n os.makedirs(os.path.join(self.location, \"results\"), exist_ok=True)", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')", "def create_dirs():\n run(\"mkdir -p %s\"%RUN_DIR)\n run(\"mkdir -p %s\"%LOG_DIR)", "def prepare_folders():\n folder_list = [\"./data\", \"./data/stage\", \"./data/spoken\", \"./data/stage_lemmas\", \"./data/spoken_lemmas\"]\n for folder in folder_list:\n if not os.path.exists(folder):\n os.mkdir(folder)\n print(f\"Created folder {folder}\")\n else:\n print(f\"Folder {folder} already existed\")", "def dirmaker(dirp):\n try:\n if not os.path.exists(dirp):\n os.makedirs(dirp)\n except:\n pass", "def mkdir(path):", "def make_dir_if_needed(path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def mkdirs(path):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)", "def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def make_directories(path):\n\n os.mkdir('{}'.format(path))\n os.mkdir('{}/perturbed_cp'.format(path))\n os.mkdir('{}/perturbed_wing'.format(path))\n os.mkdir('{}/perturbed_wing/format_wing'.format(path))\n os.mkdir('{}/perturbed_wing/unformat_wing'.format(path))", "def create_directories():\n directories = ['train', 'test', 'validation']\n\n for directory in directories:\n try:\n os.mkdir(directory)\n except OSError:\n print (f\"Creation of the directory '{directory}' failed\")", "def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(markovDir)\r\n except FileExistsError:\r\n pass", "def makeAtomDirectories(self):\n for atom in self.atoms:\n atomDir = os.getcwd() + '/' + atom\n if not os.path.isdir(atomDir):\n subprocess.call(['mkdir',atomDir])", "def make_directories(file_path):\n logger.info(\"Create all directories in the path %s\", file_path)\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n else:\n logger.warning(\"Cannot create directories %s. The directory already exists\", file_path)", "def createDirs():\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/Serotyping_Database/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/Serotyping_Database/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/VF_Database/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/VF_Database/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Results/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Results/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/xml/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/xml/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Uploads/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Uploads/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Results/RGI/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Results/RGI/')", "def makeDirs(self, inPath):\n\n if not os.path.exists(inPath): os.mkdir(inPath)", "def createDirs(self):\n logging.info(\"Creating Directories\")\n\n if not self.img_exist:\n self.reCreateDir(self.savePathJoin(\"Images\"))\n if not self.of_exist:\n self.reCreateDir(self.savePathJoin(\"Of\"))\n if not self.back_of_exist:\n self.reCreateDir(self.savePathJoin(\"Back_Of\"))\n if not self.depth_exist:\n self.reCreateDir(self.savePathJoin(\"Depth\"))\n if not self.object_detection_dir_exist and (\n self.ui.c_object_detection.isChecked() or self.ui.c_crash_plot.isChecked()\n ):\n self.reCreateDir(self.savePathJoin(\"ObjectDetection\"))\n if self.super_pixel_method != \"\" and not os.path.exists(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n ):\n os.makedirs(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n )\n\n self.reCreateDir(RESULTS)\n self.reCreateDir(NP_DIR)\n self.reCreateDir(MASK_DIR)\n\n if self.ui.c_crash_plot.isChecked():\n self.reCreateDir(PLOT_CRASH_DIR)\n if self.ui.c_draw.isChecked():\n self.reCreateDir(DRAW_DIR)\n if self.ui.c_velocity.isChecked():\n self.reCreateDir(VL_DIR)\n if self.ui.c_speed_plot.isChecked():\n self.reCreateDir(PLOT_SPEED_DIR)\n if self.super_pixel_method != \"\":\n self.reCreateDir(SUPER_PIXEL_DIR)\n if self.user[\"GT\"] != \"\" and self.ui.c_error_plot.isChecked():\n self.reCreateDir(PLOT_ERROR_DIR)", "def makeDirs(directories):\n createList = [directory for directory in directories if not os.path.exists(directory)]\n# \tmap(os.mkdir, createList)\n for directory in createList:\n os.mkdir(directory)", "def _MakeDirs(self, dir_name):\n try:\n os.makedirs(dir_name, 0755)\n except OSError:\n pass", "def __make_dirs(path, mode=0o777):\n\n try:\n os.makedirs(path, mode=mode)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise Ai1wmError('error creating a directory: {}, error: {}'.format(path, e))\n return path", "def create_directory(self):\n dirname = self.name+\"_distillates\"\n i = 1\n while True:\n try:\n mkdir(dirname)\n return dirname\n except OSError:\n dirname = self.name+\"_distillates_{0}\".format(i)\n i += 1", "def MakeDir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path)\n return True", "def make_dirs(name, dirs=[\"raw\", \"processed\"]):\n for d in dirs:\n if name not in os.listdir(f\"{project_dir}/data/{d}\"):\n os.mkdir(f\"{project_dir}/data/{d}/{name}\")", "def safeMkDirForce(pth) :\n components = pth.split('/')\n curr_dir = [components[0]]\n for c in components[1:]:\n curr_dir.append(c)\n safeMkDir('/'+os.path.join(*curr_dir),verbose=False)", "def create_directories(path):\n directories = ['images', 'pdf', 'videos', 'audio', 'spreedsheet', 'text', 'scripts', 'docs', 'other']\n for directory in directories:\n create_directory(path, directory)", "def _check_or_create_dir(directory):\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)", "def create_dirs():\n\tif os.path.isdir(path):\n\t\tshutil.rmtree(path, ignore_errors=True)\n\tos.makedirs(path+\"/log\",exist_ok=True)\n\tos.makedirs(path+\"/losses\",exist_ok=True) \n\tos.makedirs(path+\"/samples\",exist_ok=True)\n\tos.makedirs(path+\"/model\",exist_ok=True)\n\tos.makedirs(path+\"/datasets\",exist_ok=True)\n\tshutil.copy2(\"config.py\", path+\"/config.py\")\n\tfor i in rconfig[\"datasets\"]:\n\t\tdsconfig = get_dsconfig(i)\n\t\tos.makedirs(path+\"/datasets/\"+dsconfig[\"id\"],exist_ok=True)\n\t\tshutil.copy2(i+\"/dsconfig.py\", path+\"/datasets/\"+dsconfig[\"id\"]+\"/dsconfig.py\")\n\t\tcopytree(dsconfig[\"split\"], path+\"/datasets/\"+dsconfig[\"id\"]+\"/split\")", "def make_dir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path, exist_ok=True)\n return True", "def create_directory_structure(path_main):\n\n if not path_main.exists():\n path_main.mkdir(parents=True)", "def make_dir(self, path):\n import os\n if not os.path.exists(path):\n os.makedirs(path)", "def _make_dirs(filepath, mode):\n parent = filepath.parent\n if \"w\" in mode and parent:\n os.makedirs(parent, exist_ok=True)", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def create_required_dir():\n if not os.path.exists('utils_dfn/temp'):\n os.mkdir('utils_dfn/temp')\n if not os.path.exists('utils_dfn/img'):\n os.mkdir('utils_dfn/img')\n if not os.path.exists('utils_dfn/mask'):\n os.mkdir('utils_dfn/mask')\n if not os.path.exists('utils_dfn/output'):\n os.mkdir('utils_dfn/utils_dfn/output')\n # if not os.path.exists('compare'):\n # os.mkdir('compare')", "def makedirs(*ds):\n for d in ds:\n if not os.path.isdir(d):\n cmd = ['mkdir', '-p', d]\n run_safe(cmd, silent=True)", "def MakeDirs(dirname: str):\n exist_dir = False\n if not exist_dir:\n os.makedirs(dirname, exist_ok=True)", "def _create_directories(self):\n print \"[--init] creating directory structure in %s\" % (self.target_path)\n ensure_path(self.conf_path)\n for subdir in config.PROCESSING_AREAS:\n subdir_path = self.data_path + os.sep + subdir\n ensure_path(subdir_path)", "def makedir(p):\n try:\n os.makedirs(p)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(dir_path):\n if os.path.isdir(dir_path) == False:\n os.mkdir(dir_path)", "def find_and_create_dirs(dir_name):\n if os.path.exists(dir_name) is False:\n os.makedirs(dir_name)\n return dir_name", "def build_dirs(self, path):\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def create_dirs():\n os.makedirs(ORIGINAL_LOG_DIR, exist_ok=True)", "def create_directories(self, app_label):\n for folder_name in [\"views\", \"urls\", \"templates/%s\" % app_label]:\n directory_path = \"%s/%s\" % (app_label, folder_name)\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)", "def create_directories(train_path, test_path):\n train_path.joinpath(\"images\").mkdir(parents=True)\n test_path.joinpath(\"images\").mkdir(parents=True)", "def make_dirs(dirpath, debug=False):\n\tif not os.path.exists(dirpath):\n\t\ttry:\n\t\t\tos.mkdir(dirpath)\n\t\texcept OSError as e:\n\t\t\tif debug:\n\t\t\t\tprint(e)\n\t\t\t(head, tail) = os.path.split(dirpath)\n\t\t\tif '/' not in head or os.path.exists(head):\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif(make_dirs(head)):\n\t\t\t\t\treturn make_dirs(dirpath)\n\treturn dirpath", "def create_dir_if_necessary(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def _makeDir(self):\n try:\n os.mkdir(self.dir)\n # log('created directory: %s\\n' % self.dir)\n except OSError, err:\n if err.errno != errno.EEXIST:\n raise", "def make_directory(self):\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory)", "def mkdir_needed(d):\n dirs=[d['outdir']]\n dirs.append( get_sample_dir(d['outdir'],d['obj']) )\n for dr in dirs:\n if not os.path.exists(dr):\n os.makedirs(dr)", "def create_dir(cls, relpath):\r\n safe_mkdir(os.path.join(cls.build_root, relpath))", "def _create_paths(paths):\n for path in paths:\n _mkdir_if_not_exist(path)", "def make_dirs_or_not(dirpath: Union[PathOrStrType]):\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)", "def makeDir(dir_path):\n if os.path.exists(dir_path): return\n dir_path = os.path.realpath(dir_path)\n dir_path = os.path.normpath(dir_path)\n if os.path.exists(os.path.dirname(dir_path)):\n os.mkdir(dir_path)\n else:\n makeDir(os.path.dirname(dir_path))\n os.mkdir(dir_path)", "def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)", "def MakeDir(self, path: str) -> None:\n ...", "def new_dir(the_dir):\n try:\n os.makedirs(the_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass #not a problem if file exists", "def create_dirs(self):\n for new_directory in [self.event_dir, self.event_dir / 'videos']:\n new_directory.mkdir(exist_ok=self.overwrite)\n logger.debug('Dir {} created', new_directory)", "def create_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF,\n DIR_BACK, DIR_TEXT, DIR_FINAL)\n \n for dir in dirs:\n try:\n os.mkdir(os.path.join(cwd, dir))\n except OSError, e:\n print 'directory (', dir, ') already exists'", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_folders():\n os.makedirs(GRID_DIR, exist_ok=True)", "def makeDir(path):\r\n\r\n try:\r\n os.makedirs(path)\r\n except OSError as exc:\r\n if exc.errno == errno.EEXIST and os.path.isdir(path):\r\n pass\r\n else:\r\n raise", "def _ensure_dir(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)", "def mkdir_if_missing(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def create_dir():\n if check_dir_exist():\n return False\n else:\n os.makedirs(path_structure)\n return True", "def make_folders(directory, folder_names):\n\n for folder in folder_names:\n if directory == \"\":\n path = folder\n else:\n path = directory + \"/\" + folder\n if not os.path.exists(path):\n os.mkdir(path)\n else:\n print(\"Directory\", path, \"already exists\")", "def make_directory(name: str):\n try:\n os.mkdir(name)\n except:\n pass", "def prepare_dir(path, empty=False):\n\n def create_dir(path):\n \"\"\"\n Creates a directory\n :param path: string\n :return: nothing\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n if not os.path.exists(path):\n create_dir(path)", "def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)", "def dlt_create_dir(path): \n shutil.rmtree(path,ignore_errors=True)\n os.makedirs(path, exist_ok = True)", "def ensure_directory(*fnames):\n for fname in fnames:\n directory = os.path.split(fname)[0]\n if not os.path.exists(directory):\n os.makedirs(directory)", "def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")", "def make_empty_directories_linux() -> None:\n mkdir(PICTURES_DIR / 'screenshots' / 'grim')\n mkdir(PICTURES_DIR / 'screenshots' / 'swappy')", "def make_dir (path, empty=False):\n\n\n # check if google_cloud is set\n if not path[0:5] == 'gs://':\n\n # if already exists but needs to be empty, remove it first\n if isdir(path) and empty:\n shutil.rmtree(path)\n\n # do not check if directory exists, just try to make it; changed this\n # after racing condition occurred on the ilifu Slurm cluster when\n # reducing flatfields, where different tasks need to make the same\n # directory\n os.makedirs(path, exist_ok=True)\n\n\n return", "def make_release_folders(dirname):\n require('hosts', 'project_path', provided_by=envs.ENVS)\n with cd(env.project_path):\n with cd(dirname):\n run('mkdir -p logs releases packages')\n with cd('releases'):\n run('touch none')\n run('test ! -e current && ln -s none current', quiet=True)\n run('test ! -e previous && ln -s none previous', quiet=True)", "def make_dir(path=None):\n\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n exit(\"\\nOSError: You can not use that directory!\\n\")", "def makePath(path):\n\n with withFileLock(\"creating directory %s\" % path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def mkdir(self, mdir, parents=False):\n assert mdir.startswith('/'), \"%s: invalid manta path\" % mdir\n parts = mdir.split('/')\n assert len(parts) > 3, \"%s: cannot create top-level dirs\" % mdir\n if not parents:\n self.put_directory(mdir)\n else:\n # Find the first non-existant dir: binary search. Because\n # PutDirectory doesn't error on 'mkdir .../already-exists' we\n # don't have a way to detect a miss on `start`. So basically we\n # keep doing the binary search until we hit and close the `start`\n # to `end` gap.\n # Example:\n # - mdir: /trent/stor/builds/a/b/c (need to mk a/b/c)\n # parts: ['', 'trent', 'stor', 'builds', 'a', 'b', 'c']\n # start: 4\n # end: 8\n # - idx: 6\n # d: /trent/stor/builds/a/b (put_directory fails)\n # end: 6\n # - idx: 5\n # d: /trent/stor/builds/a (put_directory succeeds)\n # start: 5\n # (break out of loop)\n # - for i in range(6, 8):\n # i=6 -> d: /trent/stor/builds/a/b\n # i=7 -> d: /trent/stor/builds/a/b/c\n end = len(parts) + 1\n start = 3 # Index of the first possible dir to create.\n while start < end - 1:\n idx = (end - start) / 2 + start\n d = '/'.join(parts[:idx])\n try:\n self.put_directory(d)\n except errors.MantaAPIError:\n _, ex, _ = sys.exc_info()\n if ex.code == 'DirectoryDoesNotExist':\n end = idx\n else:\n raise\n else:\n start = idx\n\n # Now need to create from (end-1, len(parts)].\n for i in range(end, len(parts) + 1):\n d = '/'.join(parts[:i])\n self.put_directory(d)", "def mymkdir(*folders):\n for folder in folders:\n if not os.path.exists(folder):\n os.mkdir(folder)", "def create_directories(dir_names: list, base_path: str):\n\tfor dir_name in dir_names:\n\t\timage_dir = join(base_path, str(dir_name) + 'x')\n\t\tif not isdir(image_dir):\n\t\t\tos.mkdir(image_dir)", "def create_folders(self):\n\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))" ]
[ "0.76764554", "0.76725763", "0.7648681", "0.7634497", "0.7628304", "0.76242155", "0.75590366", "0.75419897", "0.75321114", "0.75267327", "0.75118554", "0.74595684", "0.7454827", "0.73759156", "0.7366004", "0.7350717", "0.7350717", "0.73391104", "0.73208994", "0.7290557", "0.7273572", "0.7266433", "0.72509295", "0.72298974", "0.72276473", "0.72193533", "0.721934", "0.7207658", "0.72042036", "0.71971387", "0.7184412", "0.71800846", "0.7177454", "0.71729034", "0.71609646", "0.7157394", "0.7155321", "0.7134216", "0.7128399", "0.71238965", "0.7122198", "0.7107741", "0.71064883", "0.7100124", "0.7094971", "0.7094176", "0.70941514", "0.7082269", "0.70753133", "0.70730454", "0.7066835", "0.7053265", "0.70406383", "0.70354354", "0.7034294", "0.7034294", "0.7034294", "0.7034294", "0.7027697", "0.7014432", "0.7011649", "0.7007315", "0.70057493", "0.70006806", "0.699982", "0.69935745", "0.6987081", "0.6985301", "0.6985266", "0.6980521", "0.6978709", "0.6976467", "0.6974437", "0.69486564", "0.69484633", "0.694727", "0.6930938", "0.6930642", "0.6919646", "0.6919646", "0.69134617", "0.6911284", "0.6905011", "0.690149", "0.6900336", "0.6898196", "0.68981135", "0.6895727", "0.689337", "0.6890913", "0.6890913", "0.68901026", "0.68894875", "0.6889157", "0.6886155", "0.6885374", "0.68839735", "0.68790364", "0.6870892", "0.68604094", "0.6857814" ]
0.0
-1
Makes a nested directory, even if intermediate links do not yet exist
def safeMkDirForce(pth) : components = pth.split('/') curr_dir = [components[0]] for c in components[1:]: curr_dir.append(c) safeMkDir('/'+os.path.join(*curr_dir),verbose=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_directory_structure(path_main):\n\n if not path_main.exists():\n path_main.mkdir(parents=True)", "def makedirs(self, parent):\n if not parent.exists():\n logging.msg('Creating directory structure for \"%s\"' % (\n parent.path,), verbosity=2)\n parent.makedirs()", "def setup_directory_structure(\n output_root: Union[str, Path], with_production: bool = False\n) -> None:\n mkdir(output_root, exists_ok=True, parents=True)\n output_root = Path(output_root).resolve()\n for link in [paths.BEST_LINK, paths.LATEST_LINK]:\n link_path = output_root / link\n if not link_path.is_symlink() and not link_path.exists():\n mkdir(link_path)\n\n if with_production:\n production_dir = output_root / paths.PRODUCTION_RUN\n mkdir(production_dir, exists_ok=True)", "def make_dir(self,*path,**kw):\r\n default = dict(\r\n suffix=None,\r\n make=True\r\n )\r\n for k in kw.keys():\r\n default[k] = kw[k]\r\n\r\n path = os.path.join(*path)\r\n \r\n path = path + self.suffix(default['suffix'])#added\r\n # print 'path is',path\r\n\r\n def makeit(path):\r\n if not os.path.isdir(path):\r\n if default['make']:\r\n os.makedirs(path)\r\n # print 'made new directory %s'%path\r\n else:\r\n print 'folder exists: %s'%path\r\n self.currentfolder = path\r\n self.update_path()\r\n # print 'current folder set to: %s'%self.currentfolder\r\n return self.currentfolder\r\n \r\n if os.path.isabs(path):\r\n #path = path + self.suffix(default['suffix'])\r\n if os.path.isdir(path):\r\n self.currentfolder = os.path.abspath(path)\r\n # print 'current folder set to: %s'%self.currentfolder\r\n self.update_path()\r\n return path\r\n else:\r\n return makeit(path)\r\n else:\r\n print 'folder path is not absolute: making subdirectory.'\r\n path = os.path.join(self.currentfolder,path)\r\n return makeit(path)", "def test_makedirs(self, client, remote_temp_dir):\n\n dir_path = posixpath.join(remote_temp_dir, \"some\", \"nested\", \"dir\")\n\n with HdfsHook() as hook:\n hook.makedirs(dir_path, mode=0o750)\n\n assert client.exists(dir_path)\n assert client.info(dir_path)[\"permissions\"] == 0o750", "def makeDir(dir_path):\n if os.path.exists(dir_path): return\n dir_path = os.path.realpath(dir_path)\n dir_path = os.path.normpath(dir_path)\n if os.path.exists(os.path.dirname(dir_path)):\n os.mkdir(dir_path)\n else:\n makeDir(os.path.dirname(dir_path))\n os.mkdir(dir_path)", "def make_dir(new_dir, path, exist_ok=True, parents=False):\n new_path = path / Path(new_dir)\n new_path.mkdir(exist_ok=exist_ok, parents=parents)\n\n return new_path", "def makedirectory(path):\n\n exist_ok = True\n if not exist_ok and os.path.isdir(path):\n with contextlib.suppress(OSError):\n Path.mkdir(path, parents=True)", "def mkdir(self, mdir, parents=False):\n assert mdir.startswith('/'), \"%s: invalid manta path\" % mdir\n parts = mdir.split('/')\n assert len(parts) > 3, \"%s: cannot create top-level dirs\" % mdir\n if not parents:\n self.put_directory(mdir)\n else:\n # Find the first non-existant dir: binary search. Because\n # PutDirectory doesn't error on 'mkdir .../already-exists' we\n # don't have a way to detect a miss on `start`. So basically we\n # keep doing the binary search until we hit and close the `start`\n # to `end` gap.\n # Example:\n # - mdir: /trent/stor/builds/a/b/c (need to mk a/b/c)\n # parts: ['', 'trent', 'stor', 'builds', 'a', 'b', 'c']\n # start: 4\n # end: 8\n # - idx: 6\n # d: /trent/stor/builds/a/b (put_directory fails)\n # end: 6\n # - idx: 5\n # d: /trent/stor/builds/a (put_directory succeeds)\n # start: 5\n # (break out of loop)\n # - for i in range(6, 8):\n # i=6 -> d: /trent/stor/builds/a/b\n # i=7 -> d: /trent/stor/builds/a/b/c\n end = len(parts) + 1\n start = 3 # Index of the first possible dir to create.\n while start < end - 1:\n idx = (end - start) / 2 + start\n d = '/'.join(parts[:idx])\n try:\n self.put_directory(d)\n except errors.MantaAPIError:\n _, ex, _ = sys.exc_info()\n if ex.code == 'DirectoryDoesNotExist':\n end = idx\n else:\n raise\n else:\n start = idx\n\n # Now need to create from (end-1, len(parts)].\n for i in range(end, len(parts) + 1):\n d = '/'.join(parts[:i])\n self.put_directory(d)", "def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')", "def create_dir(link_dir):\n if not os.path.exists(link_dir):\n os.makedirs(link_dir)", "def create_dir(cls, relpath):\r\n safe_mkdir(os.path.join(cls.build_root, relpath))", "def _make_dirs(filepath, mode):\n parent = filepath.parent\n if \"w\" in mode and parent:\n os.makedirs(parent, exist_ok=True)", "def _make_directory_in_files_share(share_client, directory_path, existing_dirs=None):\n from azure.common import AzureHttpError\n from azure.core.exceptions import ResourceExistsError\n\n if not directory_path:\n return\n\n parents = [directory_path]\n p = os.path.dirname(directory_path)\n while p:\n parents.append(p)\n p = os.path.dirname(p)\n\n for dir_name in reversed(parents):\n if existing_dirs and (dir_name in existing_dirs):\n continue\n\n try:\n share_client.get_directory_client(directory_path=dir_name).create_directory()\n except ResourceExistsError:\n pass\n except AzureHttpError:\n from knack.util import CLIError\n raise CLIError('Failed to create directory {}'.format(dir_name))\n\n if existing_dirs:\n existing_dirs.add(directory_path)", "def create_tree(file, rep):\n try:\n if file is not None:\n rep = rep + '/' + file[0:4] + '/' + file[4:6] + '/' + file[6:8]\n if not exists(rep):\n makedirs(rep)\n move(file, rep)\n else:\n if not exists(rep + '/' + file):\n move(file, rep)\n else:\n print('Already exists!')\n except OSError:\n print('Argh! I could not create the directory!')", "def safeCreateDir(relPath):\n if not os.path.isdir(relPath):\n os.mkdir(relPath)", "def makenewdir(newdir):\n \n try:\n os.makedirs(newdir)\n except OSError as e:\n if e.errno == 17:\n pass", "def makenewdir(newdir):\n \n try:\n os.makedirs(newdir)\n except OSError as e:\n if e.errno == 17:\n pass", "def new_dir(the_dir):\n try:\n os.makedirs(the_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass #not a problem if file exists", "def _create_dir(filename):\n head = os.path.dirname(filename)\n if head != '' and not os.path.isdir(head):\n os.makedirs(head)", "def mkdir(path):", "def make_dir(path, is_dir=False):\n target = path if is_dir else os.path.dirname(path)\n try:\n os.makedirs(target)\n except OSError as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(target):\n pass\n else:\n raise", "def makePath(path):\n\n with withFileLock(\"creating directory %s\" % path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def MakeDir(self, path: str) -> None:\n ...", "def dirmaker(dirp):\n try:\n if not os.path.exists(dirp):\n os.makedirs(dirp)\n except:\n pass", "def make_dir_if_needed(dir) :\n\tif not exists(dir) :\n\t\tos.makedirs(dir)", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def make_dir(dir_path):\n if os.path.isdir(dir_path) == False:\n os.mkdir(dir_path)", "def make_new_dir(path):\n\n if(not(os.path.isdir(path))):\n os.makedirs(path)\n\n return path", "def make_dir_if_needed(path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def create_path(self, path):\n path_list = path.split(\"/\")\n done_path = self.parent_folder + \"/\"\n\n for directory in path_list:\n try:\n os.mkdir(done_path + directory + \"/\")\n except FileExistsError:\n done_path += directory + \"/\"", "def makedir(p):\n try:\n os.makedirs(p)\n except OSError:\n pass", "def test_make_new_dir_2(self):\n new_dir = Path(\"test_dir\")\n Path(self.base_dir, new_dir).mkdir()\n output_path = basic.make_new_dir(self.base_dir, new_dir)\n self.assertIsNone(output_path)", "def remake_directories(*dirnames):\r\n for d in dirnames:\r\n d = path(d)\r\n if d.exists():\r\n d.rmtree()\r\n d.mkdir()\r\n return", "def test_makedirs_exists(self, client, remote_temp_dir):\n\n dir_path = posixpath.join(remote_temp_dir, \"some\", \"nested\", \"dir\")\n\n with HdfsHook() as hook:\n hook.makedirs(dir_path, exist_ok=False)\n\n with pytest.raises(IOError):\n hook.makedirs(dir_path, exist_ok=False)\n\n hook.makedirs(dir_path, exist_ok=True)", "def make_dir(self):\n if not os.path.exists(self.d):\n try:\n os.mkdir(self.d)\n except OSError, e:\n if e.errno != 17:\n raise\n pass", "def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def make_dir(name='results'):\n if os.path.isabs(name):\n output_path = name\n else:\n output_path = os.path.join(os.getcwd(), name)\n\n if ('.' not in output_path):\n directory = os.path.dirname(os.path.join(output_path, 'toto')) # doesn't work w/o 'toto'\n else :\n directory = os.path.dirname(output_path);\n\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return output_path", "def create_directory(parent_path, new_folder):\n newdir = os.path.join(parent_path, new_folder)\n if os.path.isdir(newdir):\n return False\n else:\n os.mkdir(newdir)\n return True", "def exist_ok_makedirs (path, mode=0777):\n if not os.path.isdir (path):\n head, tail = os.path.split (path)\n if not tail:\n head, tail = os.path.split (head)\n if head and tail:\n exist_ok_makedirs (head, mode)\n exist_ok_mkdir (path, mode)", "def make_dir(url):\n parts = url.strip('/').split('/')\n done = []\n for part in parts:\n path = os.path.join(STORAGE_PATH, '/'.join(done), part)\n if not os.path.exists(path):\n os.mkdir(path)\n done.append(part)", "def test_make_new_dir_3(self):\n new_dir = Path(\"test_dir\")\n Path(self.base_dir, new_dir).mkdir()\n output_path = basic.make_new_dir(self.base_dir, new_dir, attempt=2)\n exp_dir = Path(\"test_dir_1\")\n exp_path = Path(self.base_dir, exp_dir)\n with self.subTest():\n self.assertTrue(exp_path.is_dir())\n with self.subTest():\n self.assertEqual(exp_dir.stem, output_path.stem)", "def makedirs(*ds):\n for d in ds:\n if not os.path.isdir(d):\n cmd = ['mkdir', '-p', d]\n run_safe(cmd, silent=True)", "def mkdir(directory, parents=True):\n path_dir = Path(directory)\n if not path_dir.exists():\n path_dir.mkdir(parents=parents)", "def make_directory(self):\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory)", "def ensure_directory(explorer, parent_id, dirname):\n cache_key = (parent_id, dirname)\n if cache_key in DIR_CACHE:\n return DIR_CACHE[cache_key]\n\n for folder in explorer.list_folder(parent_id):\n if folder['name'] == dirname:\n folder_id = folder['id']\n break\n else:\n print(\"Creating folder {!r} in parent {}\".format(dirname, parent_id))\n folder_id = explorer.create_folder(dirname, parent_id)\n DIR_CACHE[cache_key] = folder_id\n return folder_id", "def create_new_dir(path):\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called from save_single_file_locally', extra=d)\n\n if not os.path.exists(path):\n logger.debug('Calling Function: % s',\n 'create_new_dir: create_new_dir calling makedirs', extra=d)\n os.makedirs(path)\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called makedirs', extra=d)", "def makedirs(name, exist_ok=False):\n if not os.path.exists(name) or not exist_ok:\n os.makedirs(name)", "def makeDir(path):\r\n\r\n try:\r\n os.makedirs(path)\r\n except OSError as exc:\r\n if exc.errno == errno.EEXIST and os.path.isdir(path):\r\n pass\r\n else:\r\n raise", "def ensure_dir(self, *args):\n return self.ensure(*args, **{\"dir\": True})", "def makedirs(self):\n normpath = os.path.normpath(self.path)\n parentfolder = os.path.dirname(normpath)\n if parentfolder:\n try:\n os.makedirs(parentfolder)\n except OSError:\n pass", "def create_dir(newdir):\n try:\n os.makedirs(newdir)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(newdir):\n pass\n else:\n raise", "def recur_mkdir(structure: Union[dict, list, str], current_entry, current_base: Path):\n\n if current_entry and isinstance(current_entry, str):\n current_base /= current_entry\n current_base.mkdir()\n else:\n return makedir_iterable_structure(current_entry, current_base)\n\n if isinstance(structure, dict):\n next_structure = structure.get(current_entry)\n if isinstance(next_structure, (list, dict)):\n return makedir_iterable_structure(next_structure, current_base)\n else:\n return recur_mkdir([], next_structure, current_base)", "def makeDirs(self, inPath):\n\n if not os.path.exists(inPath): os.mkdir(inPath)", "def prepare_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def create_directory(self):\n dirname = self.name+\"_distillates\"\n i = 1\n while True:\n try:\n mkdir(dirname)\n return dirname\n except OSError:\n dirname = self.name+\"_distillates_{0}\".format(i)\n i += 1", "def ifnotexistmkdir(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n return Path(directory)", "def make_dir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path, exist_ok=True)\n return True", "def _ensure_relative_directory(self, path):\n tgt = os.path.join(os.getcwd(), path)\n try:\n os.makedirs(tgt)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n return tgt", "def make_folder(l: str) -> None:\n\n Path(l).mkdir(parents=True, exist_ok=True)\n\n return", "def ensure_directory(self, name, dest, mode=0777):\n self.m.path.assert_absolute(dest)\n self._run(\n name, ['ensure-directory', '--mode', oct(mode), dest])\n self.m.path.mock_add_paths(dest)", "def make_dir(file_name): # output_file_loc = des\n for i in os.walk(f'{tmp_path}/{file_name}'):\n fld = i[0].split(file_name)[-1]\n if fld:\n loc = f\"{output_path}{fld}\"\n if not os.path.exists(f'{output_path}/{fld}'):\n os.makedirs(f'{output_path}/{fld}')\n # print(\"MAKE_DIR completed...\") \n return", "def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_dir():\n if check_dir_exist():\n return False\n else:\n os.makedirs(path_structure)\n return True", "def _create_dir_object(self, dir_path, metadata=None):\n full_path = os.path.join(self._container_path, dir_path)\n cur_path = full_path\n stack = []\n while True:\n md = None if cur_path != full_path else metadata\n ret, newmd = make_directory(cur_path, self.uid, self.gid, md)\n if ret:\n break\n # Some path of the parent did not exist, so loop around and\n # create that, pushing this parent on the stack.\n if os.path.sep not in cur_path:\n raise DiskFileError(\"DiskFile._create_dir_object(): failed to\"\n \" create directory path while exhausting\"\n \" path elements to create: %s\" % full_path)\n cur_path, child = cur_path.rsplit(os.path.sep, 1)\n assert child\n stack.append(child)\n\n child = stack.pop() if stack else None\n while child:\n cur_path = os.path.join(cur_path, child)\n md = None if cur_path != full_path else metadata\n ret, newmd = make_directory(cur_path, self.uid, self.gid, md)\n if not ret:\n raise DiskFileError(\"DiskFile._create_dir_object(): failed to\"\n \" create directory path to target, %s,\"\n \" on subpath: %s\" % (full_path, cur_path))\n child = stack.pop() if stack else None\n return True, newmd", "def mkdirpath (dirpath):\n\n if os.path.isdir(dirpath):\n return\n\n incpath = \"\"\n for subdir in os.path.normpath(dirpath).split(os.path.sep):\n incpath = os.path.join(incpath, subdir)\n if not os.path.isdir(incpath):\n os.mkdir(incpath)", "def create_directory() -> None:\n slash_indexes = []\n for x in range(0, len(directory)):\n if directory[x] == \"/\" or directory[x] == \"\\\\\":\n slash_indexes.append(x)\n \n directories_to_create = []\n for x in range(0, len(slash_indexes)):\n if x == len(slash_indexes)-1:\n if os.path.isdir(directory[0:len(directory)]):\n existing_directory = directory[0:len(directory)]\n else:\n directories_to_create.append(directory[0:len(directory)])\n\n else: \n if os.path.isdir(directory[0:slash_indexes[x+1]]):\n existing_directory = directory[0:slash_indexes[x+1]]\n else:\n directories_to_create.append(directory[0:slash_indexes[x+1]])\n\n for _dir in directories_to_create:\n os.mkdir(_dir)", "def MakeDir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path)\n return True", "def test_make_new_dir_4(self):\n new_dir = Path(\"test_dir\")\n Path(self.base_dir, new_dir).mkdir()\n Path(self.base_dir, new_dir.stem + \"_1\").mkdir()\n output_path = basic.make_new_dir(self.base_dir, new_dir, attempt=3)\n exp_dir = Path(\"test_dir_2\")\n exp_path = Path(self.base_dir, exp_dir)\n with self.subTest():\n self.assertTrue(exp_path.is_dir())\n with self.subTest():\n self.assertEqual(exp_dir.stem, output_path.stem)", "def ensure_dir(path):\n parent = os.path.dirname(path)\n if not os.path.exists(parent):\n os.makedirs(parent)", "def make_dirs(dirpath, debug=False):\n\tif not os.path.exists(dirpath):\n\t\ttry:\n\t\t\tos.mkdir(dirpath)\n\t\texcept OSError as e:\n\t\t\tif debug:\n\t\t\t\tprint(e)\n\t\t\t(head, tail) = os.path.split(dirpath)\n\t\t\tif '/' not in head or os.path.exists(head):\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif(make_dirs(head)):\n\t\t\t\t\treturn make_dirs(dirpath)\n\treturn dirpath", "def make_dirs():\n global paths_made\n\n #Have we done this already? Then why are we trying to do it again?\n if paths_made:\n return\n\n #Make the dirs\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(datastream_dir, exist_ok=True)\n paths_made = True", "def mirror_directory_tree(self, out_dirpath, avoid_dirpath=None):\n if out_dirpath[-1] != '/':\n out_dirpath += '/'\n if not os.path.exists(out_dirpath):\n os.makedirs(out_dirpath)\n if avoid_dirpath:\n if avoid_dirpath[-1] != '/':\n avoid_dirpath += '/'\n if self.dirpath == avoid_dirpath:\n return\n self.mirror_directory_tree_loop(self, self.dirpath, out_dirpath, avoid_dirpath)\n return", "def make_folders(self):\n\t\tfor name in self.folders:\n\t\t\tos.makedirs(self.path+\"/\"+name,exist_ok=True)", "def handle_directory(directory_path):\n # if directory has no trailing '/' then add it\n if directory_path[-1] != '/':\n directory_path += '/'\n # if directory doesn't exist then create it\n if not os.path.exists(directory_path):\n os.mkdir(directory_path)\n\n return directory_path", "def create_unexisted_dir(directory, element):\n directory += \"/\" + element\n if get_file_type(directory) == 0:\n mkdir(directory)\n return directory", "def MaybeMakeDirectory(*path):\n file_path = os.path.join(*path)\n try:\n os.makedirs(file_path)\n except OSError, e:\n if e.errno != errno.EEXIST:\n raise", "def create_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF,\n DIR_BACK, DIR_TEXT, DIR_FINAL)\n \n for dir in dirs:\n try:\n os.mkdir(os.path.join(cwd, dir))\n except OSError, e:\n print 'directory (', dir, ') already exists'", "def test_make_new_dir_1(self):\n test_dir = Path(\"test_dir\")\n output_path = basic.make_new_dir(self.base_dir, test_dir)\n exp_dir = \"test_dir\"\n exp_path = Path(self.base_dir, exp_dir)\n with self.subTest():\n self.assertTrue(exp_path.is_dir())\n with self.subTest():\n self.assertEqual(exp_dir, output_path.stem)", "def mkdir(path):\n\tif not Path(path).exists():\n\t\tPath(path).mkdir(parents=True, exist_ok=True)", "def create_folder(path):\n try:\n os.listdir(path)\n except:\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)\n return path", "def dlt_create_dir(path): \n shutil.rmtree(path,ignore_errors=True)\n os.makedirs(path, exist_ok = True)", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def create_directory(path, name):\n new_path = os.path.join(path, name)\n if not os.path.isdir(new_path):\n subprocess.run(['mkdir', new_path])", "def make_dir(sc, dir_path):\n try:\n filesystem = get_file_system(sc)\n tmp = path(sc, dir_path)\n if not filesystem.exists(tmp):\n filesystem.mkdirs(tmp)\n except Exception as e:\n raise e", "def make_dir (path, empty=False):\n\n\n # check if google_cloud is set\n if not path[0:5] == 'gs://':\n\n # if already exists but needs to be empty, remove it first\n if isdir(path) and empty:\n shutil.rmtree(path)\n\n # do not check if directory exists, just try to make it; changed this\n # after racing condition occurred on the ilifu Slurm cluster when\n # reducing flatfields, where different tasks need to make the same\n # directory\n os.makedirs(path, exist_ok=True)\n\n\n return", "def create_directory_structure(root):\n berlin = os.path.join(root, \"Berlin\",\"Berlin_test\")\n istanbul = os.path.join(root, \"Istanbul\",\"Istanbul_test\")\n moscow = os.path.join(root, \"Moscow\", \"Moscow_test\")\n try:\n os.makedirs(berlin)\n os.makedirs(istanbul)\n os.makedirs(moscow)\n except OSError:\n print(\"failed to create directory structure\")\n sys.exit(2)", "def prepare_dir(path, empty=False):\n\n def create_dir(path):\n \"\"\"\n Creates a directory\n :param path: string\n :return: nothing\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n if not os.path.exists(path):\n create_dir(path)", "def make_subdirs(self) -> None:\r\n\r\n # Pull off everything below the root.\r\n subpath = self.full_path[len(self.context.root):]\r\n logger.debug(f\"make_subdirs: subpath is {subpath}\")\r\n \r\n # Split on directory separators, but drop the last one, as it should\r\n # be the filename.\r\n dirs = subpath.split(os.sep)[:-1]\r\n logger.debug(f\"dirs is {dirs}\")\r\n current = self.context.root\r\n \r\n for dir in dirs:\r\n if dir:\r\n current = os.path.join(current, dir)\r\n if not os.path.isdir(current):\r\n os.mkdir(current, 0o700) # FIXME - This should be defined in the server startup\r", "def ensure_dir(f):\n\td=os.path.dirname(f)\n\tif not os.path.exists(d):\n\t\tos.makedirs(d)", "def _ensure_directory(self, dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "def ensuredir(dpath, mode=0o1777):\n if isinstance(dpath, (list, tuple)): # nocover\n dpath = join(*dpath)\n if not exists(dpath):\n try:\n os.makedirs(normpath(dpath), mode=mode)\n except OSError: # nocover\n raise\n return dpath", "def create_directory(directory, parents=False, exist_ok=False):\n directory.mkdir(parents=parents, exist_ok=exist_ok)", "def ensure_directory(path):\n\tdir_path = os.path.dirname(path)\n\tif os.path.exists(dir_path):\n\t\treturn\n\tensure_directory(dir_path)\n\ttry:\n\t\tos.mkdir(dir_path)\n\texcept OSError as e:\n\t\t# Ignore if EEXISTS. This is needed to avoid a race if two getters run at once.\n\t\tif e.errno != errno.EEXIST:\n\t\t\traise", "def make_dir(path, allow_symlink=True):\n if not os.path.exists(path):\n if not allow_symlink and os.path.islink(path):\n raise Exception('Dangling link: ' + path)\n os.mkdir(path)\n os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)", "def ensure_dir(root, path):\n full_path = root\n for seg in path.split(os.sep):\n full_path += os.sep + seg\n if os.path.exists(full_path):\n if not os.path.isdir(full_path):\n raise ValueError(\"'{}' is not a directory\".format(full_path))\n else:\n os.makedirs(full_path)", "def make_directory(dir_path):\n abs_dir_path = os.path.abspath(dir_path)\n if not os.path.exists(abs_dir_path):\n os.makedirs(abs_dir_path)", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass" ]
[ "0.6948033", "0.6809353", "0.68089527", "0.6735363", "0.6733862", "0.67253435", "0.6716056", "0.67078507", "0.6676089", "0.6670901", "0.6664893", "0.66282934", "0.66131335", "0.6590413", "0.65780735", "0.6577037", "0.6561761", "0.6561761", "0.65132505", "0.6512065", "0.6511371", "0.65023", "0.6492927", "0.64810026", "0.6476417", "0.6455103", "0.6424133", "0.6424133", "0.64099765", "0.6403998", "0.64030194", "0.64009976", "0.63803345", "0.63705885", "0.6357044", "0.6352424", "0.6349826", "0.6346909", "0.63295454", "0.63249797", "0.6323272", "0.6299009", "0.62989193", "0.6292677", "0.62798786", "0.6277357", "0.6273613", "0.62715876", "0.62658805", "0.62609446", "0.6253205", "0.6251032", "0.62453943", "0.62403", "0.62379587", "0.6231581", "0.6231268", "0.6230838", "0.6229865", "0.62297237", "0.6228938", "0.6224457", "0.622272", "0.6221156", "0.6220079", "0.62200767", "0.62166613", "0.62165415", "0.6215868", "0.6215705", "0.6212213", "0.62090087", "0.62039596", "0.6193389", "0.6187576", "0.6186644", "0.61858207", "0.6177464", "0.6165416", "0.6160389", "0.61599725", "0.61533207", "0.6150213", "0.6149229", "0.6144244", "0.6143528", "0.61417836", "0.61363393", "0.61313945", "0.6130433", "0.61284906", "0.61181587", "0.61176276", "0.6115529", "0.6114204", "0.61084116", "0.6108187", "0.6105847", "0.6102872", "0.6102872" ]
0.6692424
8
Flattens a list of Lists to a list
def flatten(lol ): return [item for sublist in lol for item in sublist]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flat_list_of_lists(l):\n return [item for sublist in l for item in sublist]", "def flattened(list_of_lists):\n res = functools.reduce(operator.iconcat, list_of_lists, [])\n return res", "def flatten(list_of_lists: List[List]) -> List:\n return reduce(iconcat, list_of_lists, [])", "def flatten(listoflists):\r\n return [j for i in listoflists for j in i]", "def flatten(list_of_lists: List[List]) -> List:\n return list(itertools.chain(*list_of_lists))", "def flatten_list(l):\n return [item for sublist in l for item in sublist]", "def flat_list(list_: list) -> list:\n return [item for sublist in list_ for item in sublist]", "def flatten(list_of_lists):\n flattened_list = [y for x in list_of_lists for y in x]\n return flattened_list", "def flatten(l):\n return [item for sublist in l for item in sublist]", "def flatten(l):\n return [item for sublist in l for item in sublist]", "def flatten(ls):\r\n return [item for sublist in ls for item in sublist]", "def flattenList(input_list):\r\n return [item for sublist in input_list for item in sublist]", "def flatten(list_of_lists: List[Any]) -> List[Any]:\n return list(itertools.chain.from_iterable(list_of_lists))", "def flatten(l: List[List[Any]]) -> List[Any]:\n\n return [x for y in l for x in y]", "def flatten_list(in_list):\n return [item for sublist in in_list for item in sublist]", "def flatten(listOfLists):\n return list(chain.from_iterable(listOfLists))", "def flatten_lists(lst):\n return list(chain(*lst))", "def flattenList(l=None):\r\n flat_list = [item for sublist in l for item in sublist]\r\n return flat_list", "def list_flatten(input_list):\n if len(input_list) > 0 and isinstance(input_list[0], (list, np.ndarray)):\n return functools.reduce(operator.iconcat, input_list, [])\n\n return input_list", "def flatten(l):\n result = []\n for x in l:\n if type(x) is list:\n result.extend(flatten(x))\n else:\n result.append(x)\n return result", "def flatten(nested_list: List[List[T]]) -> List[T]:\n return [item for sublist in nested_list for item in sublist]", "def make_flat(list_of_lists: list) -> list:\n return sum([list(item) for item in list_of_lists], [])", "def flatten(list_of_lists):\n flattened = []\n for item in list_of_lists:\n if isinstance(item, Iterable) and not isinstance(item, (str, bytes)):\n flattened.extend(item)\n else:\n flattened.append(item)\n\n return flattened", "def _flatten_list(x):\n return list(chain.from_iterable(x))", "def flatten_list_of_lists(list_of_lists):\n\n if isinstance(list_of_lists, list):\n rt = []\n for i in list_of_lists:\n if isinstance(i, list):\n rt.extend(ConfigManager.flatten_list_of_lists(i))\n else:\n rt.append(i)\n\n return rt\n\n return list_of_lists", "def flatten(nested_list):\n return [item for a_list in nested_list for item in a_list]", "def flatten(list):\n\n if isinstance(list, collections.Iterable) and not isinstance(list, (str, bytes)):\n return [a for i in list for a in flatten(i)]\n else:\n return [list]", "def _flatten(x: Sequence) ->list:\n return [item for sublist in x for item in sublist]", "def flatten(nested_list):\r\n return list(chain.from_iterable(nested_list))", "def flatten(list_to_flatten): \n flattened_list = []\n for item in list_to_flatten:\n if isinstance(item, list) or isinstance(item, tuple):\n flattened_list += flatten(item)\n else:\n flattened_list.append(item)\n return flattened_list", "def flatten(nestedlist: Iterable) -> list:\n newlist = []\n for item in nestedlist:\n if isinstance(item, list):\n newlist.extend(flatten(item))\n else:\n newlist.append(item)\n return newlist", "def flatten(lis):\n\tnew_lis = []\n\tfor item in lis:\n\t\tif type(item) == type([]):\n\t\t\tnew_lis.extend(flatten(item))\n\t\telse:\n\t\t\tnew_lis.append(item)\n\treturn new_lis", "def flatten_list(lol):\n return list(itertools.chain.from_iterable(lol))", "def flatten(lst):\n out = []\n for v in lst:\n if v is None: continue\n if isinstance(v, list):\n out.extend(flatten(v))\n else:\n out.append(v)\n return out", "def flatten_list(alist):\n return list(flatten_list_gen(alist))", "def flatten_list(lst):\r\n if not lst:\r\n return []\r\n if type(lst[0]) == list:\r\n return flatten_list(lst[0]) + flatten_list(lst[1:])\r\n return [lst[0]] + flatten_list(lst[1:])", "def flatten(nested_lst):\r\n if not isinstance(nested_lst, list):\r\n return(nested_lst)\r\n\r\n res = []\r\n for l in nested_lst:\r\n if not isinstance(l, list):\r\n res += [l]\r\n else:\r\n res += flatten(l)\r\n\r\n\r\n return(res)", "def flatten(t):\n flat_list = []\n for sublist in t:\n for item in sublist:\n flat_list.append(item)\n return flat_list", "def flatten_list(_list):\n if not _list:\n return []\n return reduce(operator.add, _list)", "def flatten(lis):\n new_lis = []\n for item in lis:\n if type(item) == type([]):\n new_lis.extend(flatten(item))\n else:\n new_lis.append(item)\n return new_lis", "def flatten(lst):\n buildlist = []\n for i in lst:\n if type(i) is list:\n buildlist += flatten(i)\n else:\n buildlist.append(i)\n return buildlist", "def _flatten_list(input_list: Any) -> List[int]:\n flattened_list = []\n for element in input_list:\n if isinstance(element, list):\n flattened_list += _flatten_list(element)\n else:\n flattened_list.append(element)\n return flattened_list", "def flatten(nested_list):\n result = []\n for item in nested_list:\n if type(item) == type([]):\n result.extend(flatten(item))\n else:\n result.append(item)\n return result", "def flatten(llst):\n res = []\n for lst in llst:\n res += lst\n return res", "def flatten(ls):\n return sum(ls, [])", "def flatten(self):\n if self.data:\n def flat(l):\n ans=[]\n for i in l:\n if type(i)==list:\n ans.extend(flat(i))\n else:\n ans.append(i)\n return ans\n return flat(self.data)\n else:\n return []", "def flatten(nested_list):\n t_l = []\n for i in nested_list:\n if not isinstance(i, list):\n t_l.append(i)\n else:\n t_l.extend(flatten(i))\n return t_l", "def flatten_one(list_of_lists):\n \n for x in list_of_lists:\n if isinstance(x, list):\n for y in x:\n yield y\n else:\n yield x", "def flatten(t: typing.Iterable[typing.List[FlattenItem]]) -> typing.List[FlattenItem]:\n return list(itertools.chain.from_iterable(t))", "def flatten(items):\n if items == []:\n return items\n if isinstance(items, list):\n flattend = []\n for item in items:\n flattend.extend(flatten(item))\n return flattend\n return [items]", "def flatten(src):\n return [item for sublist in src for item in sublist]", "def flatten_one(list_of_lists):\n\n for x in list_of_lists:\n if isinstance(x, list):\n for y in x:\n yield y\n else:\n yield x", "def flat_list(old_list):\n new_list = []\n for element in old_list:\n if \"list\" in str(type(element)):\n recursive_list = flat_list(element)\n for sub_element in recursive_list:\n new_list.append(sub_element) \n else:\n new_list.append(element)\n return new_list", "def flatten( liste ) :\n return list(set([ e for sublist in liste for e in sublist ]))\n # TODO :\n # more efficient to use\n # import itertools\n # list(itertools.chain(*list2d))", "def flatten_2D_list(list_2d):\n return [item for sub in list_2d for item in sub]", "def lflatten(*lst):\n return flatten(list(lst))", "def flatten_list(l):\n obj = []\n\n def recurse(ll):\n if isinstance(ll, list) or isinstance(ll, np.ndarray):\n for i, _ in enumerate(ll):\n recurse(ll[i])\n else:\n obj.append(ll)\n\n recurse(l)\n return obj", "def flatten(lst):\n \n for x in lst:\n if isinstance(x, list):\n for x in flatten(x):\n yield x\n else:\n yield x", "def flatten(iterable):\n result = []\n\n for item in iterable:\n if isinstance(item, list):\n result.extend(flatten(item))\n else:\n result.append(item)\n\n return result", "def _transpose_list_of_lists(list_of_lists: List[List[any]]) -> List[List[any]]:\n new_list = []\n for i in range(len(list_of_lists[0])):\n new_list.append([])\n for j in range(len(list_of_lists)):\n new_list[i].append(list_of_lists[j][i])\n return new_list", "def flatten_list(x):\n result = []\n for el in x:\n if hasattr(el, \"__iter__\") and not isinstance(el, basestring):\n result.extend(flatten_list(el))\n else:\n result.append(el)\n return result", "def flatten_as_list(iterable):\n return list(chain(*iterable))", "def flatten(l):\n if isinstance(l, list):\n for el in l:\n if isinstance(el, list):\n yield from flatten(el)\n else:\n yield el\n else:\n return l", "def flatten_list(nested_list):\n nested_list = deepcopy(nested_list)\n while nested_list:\n sublist = nested_list.pop(0)\n if isinstance(sublist, list):\n nested_list = sublist + nested_list\n else:\n yield sublist", "def convert_list(l):\r\n l = [list(elem) for elem in l]\r\n return l", "def flatten_list(lst):\n assert isinstance(lst, list), \"you didn't pass a list!\"\n\n if isinstance(lst[0], list):\n if len(lst[0])>1:\n return ['-'.join(i) for i in lst] # then its a kpoints list\n return flatten_list([i[0] for i in lst])\n else:\n return [i for i in lst]", "def flatten_me(lst, new_lst=None):\n if new_lst is None:\n new_lst = []\n\n for item in lst:\n if isinstance(item, list):\n flatten_me(item, new_lst)\n else:\n new_lst.append(item)\n\n return new_lst", "def listify(x, dedup=True):\n if not isinstance(x, list):\n x = [x]\n res = flatten(x)\n res = [x for x in res if x is not None]\n if dedup:\n return dedup_list(res)\n return res", "def flatten(L):\n def rec(lst):\n for el in lst:\n if type(el) is not type([]):\n result.append(el)\n else:\n rec(el)\n result = []\n rec(L)\n return result", "def flatten(lst):\n\n for x in lst:\n if isinstance(x, list):\n for x in flatten(x):\n yield x\n else:\n yield x", "def flatten(box: list) -> list:\n if len(box) == 1:\n result = flatten(box[0]) if type(box[0]) == list else box\n elif type(box[0]) == list:\n result = flatten(box[0]) + flatten(box[1:])\n else:\n result = [box[0]] + flatten(box[1:])\n return result", "def flatten(self, l, ltypes=(list, tuple)):\n i = 0\n while i < len(l):\n while isinstance(l[i], ltypes):\n if not l[i]:\n l.pop(i)\n if not len(l):\n break\n else:\n l[i:i + 1] = list(l[i])\n i += 1\n return l", "def flatten_2d(a_2dlist):\n return list(itertools.chain(*a_2dlist))", "def flatten(lst):\n if atomp(lst):\n return lst\n return _flatten(lst)", "def flatten(seq):\n \n ret = []\n def _flatten(seq):\n for i in seq:\n if isinstance(i, (list, tuple)):\n _flatten(i)\n else:\n ret.append(i)\n return ret\n \n if isinstance(seq, tuple):\n return tuple(_flatten(seq))\n \n return _flatten(seq)", "def flatten(lst):\n assert (isinstance(lst, list)), \"Argument must be an list\"\n\n for i in lst:\n \t# print(i)\n \tif type(i) == list:\n \t\tflatten(i)\n \telse:\n \t\tuniversal_lst.append(i)\n return universal_lst", "def flatten(lst):\n if atomp(lst): # scalar\n return lst\n return _flatten(lst)", "def flatten(l, level = 0):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n flat_l = []\n if isinstance(l, list):\n for e in l:\n flat_l += flatten(e)\n else:\n flat_l = [l]\n\n return flat_l", "def flatten(x: List) -> List:\n return functools.reduce(lambda cum, this: cum + this, x, [])", "def _transpose_list_of_lists(lol):\n assert lol, \"cannot pass the empty list\"\n return [list(x) for x in zip(*lol)]", "def flatten(coordinates_list: list) -> list:\n flattened_list = []\n for coordinate in coordinates_list:\n if isinstance(coordinate, list):\n flattened_list.extend(flatten(coordinate))\n else:\n flattened_list.append(coordinate)\n return flattened_list", "def flatten(iterable):\n return [x for x in actually_flatten(iterable)]", "def transpose(a: List[List[T]]) -> List[List[T]]:\n return list(map(list, zip(*a)))", "def flatten(list_):\n for elem in list_:\n if type(elem) != list:\n yield elem\n else:\n yield from flatten(elem)", "def flatten_twice(listOfListsOfLists):\n return flatten(flatten(listOfListsOfLists))", "def flatten(l: iter):\n return functools.reduce(lambda x, y: x + y, l)", "def flatten(a):\r\n if isinstance(a, (tuple, list, set)):\r\n l = []\r\n for item in a:\r\n l.extend(flatten(item))\r\n return l\r\n else:\r\n return [a]", "def transpose_list_of_lists(lol):\n assert lol, \"cannot pass the empty list\"\n return [list(x) for x in zip(*lol)]", "def flattenX(x):\n\n result = []\n for el in x:\n #if isinstance(el, (list, tuple)):\n if hasattr(el, \"__iter__\") and not isinstance(el, basestring):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result", "def flatten(items):\n if isinstance(items, (numbers.Number, six.string_types)):\n return items\n return list(lazy_flatten(items))", "def flatten(nested_num_list):\n lst=[]\n\n for element in nested_num_list:\n if type(element) == type([]):\n element = flatten(element)\n for el in element:\n lst.append(el)\n else: \n lst.append(element) # element is not a list\n \n\n return lst", "def FlattenTestList(values):\n ret = []\n for v in values:\n if isinstance(v, list):\n ret += v\n else:\n ret.append(v)\n return ret", "def flatten(x):\n result = []\n for el in x:\n if hasattr(el, \"__iter__\") and not isinstance(el, basestring):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result", "def flatten(lst):\n \"*** YOUR CODE HERE ***\"", "def flatten(vec):\n return [ [v for v in z] for z in vec][0]", "def transformation_flatten(twoDlistinput):\r\n oneDlistoutput = []\r\n for i in range(len(twoDlistinput)):\r\n for j in range(len(twoDlistinput[i])):\r\n oneDlistoutput.append(twoDlistinput[i][j])\r\n return(oneDlistoutput)", "def flatten_list(l, log):\n warning_msg = 'Warning: returning None.'\n if l is None or l[0] is None:\n if log is not None:\n log.info(warning_msg)\n else:\n print warning_msg\n return [None]\n else:\n return [val for sublist in l for val in sublist]", "def flatten(nested_list):\n for elt in nested_list:\n if isinstance(elt, collections.Iterable) and not isinstance(elt, six.string_types):\n for sub in flatten(elt):\n yield sub\n else:\n yield elt", "def flatten_2d_coeff_lst(coeff_lst_2d: list,\n flatten_tensors: bool = True) -> list:\n flat_coeff_lst = []\n for coeff in coeff_lst_2d:\n if type(coeff) is tuple:\n for c in coeff:\n if flatten_tensors:\n flat_coeff_lst.append(c.flatten())\n else:\n flat_coeff_lst.append(c)\n else:\n if flatten_tensors:\n flat_coeff_lst.append(coeff.flatten())\n else:\n flat_coeff_lst.append(coeff)\n return flat_coeff_lst", "def flatten_list(result_set):\n return sum(list(result_set), [])" ]
[ "0.85435647", "0.85404813", "0.852175", "0.85197717", "0.848276", "0.84166855", "0.839817", "0.8382662", "0.83767587", "0.83767587", "0.83384365", "0.8302829", "0.82918155", "0.82290846", "0.82144505", "0.82091016", "0.81558263", "0.81378996", "0.8100652", "0.8090682", "0.806518", "0.8023816", "0.80187964", "0.79633415", "0.7951053", "0.7926984", "0.79162455", "0.78948915", "0.7858383", "0.7841037", "0.7838053", "0.7831811", "0.7795479", "0.7739831", "0.77200025", "0.7715584", "0.7712966", "0.7692648", "0.7688762", "0.76764584", "0.76606894", "0.7603299", "0.75413454", "0.75195616", "0.750806", "0.75026274", "0.74627846", "0.74549145", "0.74498826", "0.7445222", "0.7430639", "0.7422008", "0.7413851", "0.7411958", "0.740684", "0.7378806", "0.73652864", "0.73344976", "0.7275713", "0.7257868", "0.72560596", "0.7255757", "0.7240329", "0.7239813", "0.723733", "0.72252226", "0.7216088", "0.72107303", "0.7192063", "0.71730936", "0.71555024", "0.7148808", "0.7144525", "0.7118208", "0.70994085", "0.70974374", "0.7089518", "0.7061953", "0.7052713", "0.69990414", "0.699756", "0.6968172", "0.6946886", "0.6889123", "0.6886924", "0.6861027", "0.6851724", "0.68208355", "0.68113625", "0.6795198", "0.6795176", "0.6787152", "0.6787045", "0.67764425", "0.6736866", "0.67043036", "0.6685959", "0.6668975", "0.66481614", "0.6635946" ]
0.7730698
34
Merge dictionaries, presumes no overlap in keys
def merge_dicts(listDicts) : return dict(itertools.chain.from_iterable([x.items() for x in listDicts]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dict_merge(dict1, dict2):\n combined = dict(dict1)\n combined.update(dict2)\n return combined", "def dict_merge(dict1, dict2):\n combined = dict(dict1)\n combined.update(dict2)\n return combined", "def merge(dict1: dict, dict2: dict):\n if dict1 is None or dict2 is None:\n return None\n keys1 = dict1.keys()\n keys2 = dict2.keys()\n if keys1 & keys2 != set():\n raise Exception(\"Non linear patterns not supported\")\n dict_r = {k: dict1[k] for k in keys1}\n dict_r.update({k: dict2[k] for k in keys2})\n return dict_r", "def _merge(old_dict, new_dict):\n dict3 = old_dict.copy()\n for k, v in new_dict.items():\n if k in dict3:\n dict3[k].append(v)\n else:\n dict3[k] = [v]\n return dict3", "def mergedict(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge(a: dict, b: dict) -> dict:\n return __merge(a, b)", "def concat_dict(d1, d2):\n if d1 is None:\n return d2\n if d2 is None:\n return d1\n else:\n assert set(d1.keys()) == set(d2.keys())\n return {k: np.concatenate([d1[k], d2[k]], axis=0) for k in d1}", "def _merge(x, y):\n for key in x:\n if key in y:\n x[key] = _merge(x[key], y[key])\n y[key] = None\n for key in y:\n if y[key] is not None:\n x[key] = y[key]\n return x", "def merge_dicts(dict1, dict2):\n merge_dict = dict1\n\n for key, value in dict1.items():\n if key in dict2:\n merge_dict[key] = dict2[key]\n\n return merge_dict", "def merge_dicts(d1, d2):\n return {**d1, **d2}", "def merge_dict(dict1, dict2):\n if dict1 is None:\n dict1 = {}\n if dict2 is None:\n dict2 = {}\n result = dict1.copy()\n for key in dict2:\n result[key] = dict2[key]\n return result", "def merge(first: Dict[Any, Any], second: Dict[Any, Any]) -> Dict[Any, Any]:\n if not isinstance(second, dict):\n return second\n result = deepcopy(first)\n for key, value in second.items():\n if key in result and isinstance(result[key], dict):\n result[key] = merge(result[key], value)\n else:\n result[key] = deepcopy(value)\n return result", "def merge_dicts(a, b):\n new_dict = defaultdict(list)\n add_to_dict(a, new_dict)\n add_to_dict(b, new_dict)\n return new_dict", "def merge_dicts(dict1, dict2):\n return dict(Counter(dict1) + Counter(dict2))", "def merge_dictionaries(d1, d2):\n\n if d2 is None: \n return\n\n for k, v in d2.items():\n if k not in d1:\n d1[k] = dict()\n if isinstance(v, dict):\n merge_dictionaries(d1[k], v)\n else:\n d1[k] = v", "def merge_dict(own: dict, other: dict) -> dict:\n for element in other:\n if own.get(element, None) is None:\n own[element] = other[element]\n else:\n raise ValueError('Conflicting kwargs')\n return own", "def _dict_merge(merge_dict, into_dict):\n for k, v in merge_dict.items():\n if k not in into_dict:\n into_dict[k] = v\n continue\n\n current_val = into_dict[k]\n\n if isinstance(v, dict) and isinstance(current_val, dict):\n _dict_merge(v, current_val)\n continue\n\n # otherwise we just overwrite\n into_dict[k] = v", "def _merge_dicts(d1, d2):\n for key, value in d2.items():\n if key in d1:\n if isinstance(d1[key], dict) and isinstance(value, dict):\n _merge_dicts(d1[key], value)\n continue\n d1[key] = value", "def merge_dict(dict1, dict2):\n merged_dict = dict1.copy()\n merged_dict.update(dict2)\n return merged_dict", "def _merge(old, new):\n from collections.abc import Mapping\n if new is None:\n return\n for key in new:\n if isinstance(new[key], Mapping):\n if isinstance(old.get(key), Mapping):\n yield (old[key], new[key])\n else:\n old[key] = dict(new[key])\n else:\n old[key] = new[key]", "def merge_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge(incoming={}, output={}, overwrite=False):\n\t_output = output.copy()\n\tfor _key, _value in incoming.items(): # loop through each key/value pair\n\t\tif (_key in _output) and isinstance(_value, dict): # detect when we need to recurse\n\t\t\t_output[_key] = merge(_value, _output[_key]) # recurse\n\t\telse: # _key is not in output\n\t\t\tif _key in _output and overwrite == False: # we check if it already exists, and if we care\n\t\t\t\tcontinue # don't overwrite existing values unless overwrite is 'True'\n\t\t\t_output[_key] = _value # add key/value pair\n\n\treturn _output # give back the merged dict", "def dict_merge(dict_one, dict_two):\n assert (isinstance(dict_one, dict)), \"First argument must be a dictionary\"\n assert (isinstance(dict_two, dict)), \"Second argument must be a dictionary\"\n\n answer = {}\n dict_one_key = []\n dict_one_value = []\n dict_two_key = []\n dict_two_value = []\n \n for i in dict_one:\n \tdict_one_key.append(i)\n for i in dict_one:\n \tdict_one_value.append(dict_one[i])\n\n for i in dict_two:\n \tdict_two_key.append(i)\n for i in dict_two:\n \tdict_two_value.append(dict_two[i])\n\n for i in range(len(dict_one)):\n \tanswer[dict_one_key[i]] = dict_one_value[i]\n \t# print(answer)\n \tfor j in range(len(dict_two)):\n \t\tif dict_one_key[i] == dict_two_key[j]:\n \t\t\tanswer[dict_one_key[i]] = dict_one_value[i] + dict_two_value[j]\n \t\telif (dict_two_key[j] in answer) == False:\n \t\t\tanswer[dict_two_key[j]] = dict_two_value[j]\n if len(dict_one) == 0:\n \treturn dict_two\n return answer", "def mergeDict(self, dict1, dict2):\n dict3 = {**dict1, **dict2}\n for key, value in dict3.items():\n if key in dict1 and key in dict2:\n try:\n dict3[key] = value + dict1[key]\n except TypeError:\n self.log.info(f\"Error merging dicts. {value} + {dict1[key]}\")\n return dict3", "def merge_dicts(a, b):\n if not isinstance(b, dict):\n return b\n\n result = a\n for k, v in b.items():\n if k in result and isinstance(result[k], dict):\n result[k] = merge_dicts(result[k], v)\n else:\n result[k] = v\n return result", "def merge_dicts(*dict_args):\n result = collections.defaultdict(list)\n for dictionary in dict_args:\n for k in dictionary.keys():\n result[k].append(dictionary.get(k))\n return result", "def merge_dicts(dict_a, dict_b):\n dict_c = dict_a.copy()\n dict_c.update(dict_b)\n return dict_c", "def combine_and_count(a: dict, b: dict) -> dict:\r\n if not a:\r\n return b\r\n\r\n if not b:\r\n return a\r\n\r\n # check overlapping keys\r\n overlap_keys = [key for key in a if key in b]\r\n\r\n combined = a | b\r\n\r\n for key in overlap_keys:\r\n combined[key] += a[key]\r\n\r\n return combined", "def _merge_dicts(*args):\n return reduce(lambda d, s: d.update(s) or d, args)", "def merge_dicts(dest, src):\n\n for k, v in src.items():\n if isinstance(v, collections.Mapping):\n dest_v = dest.get(k, {})\n if not isinstance(dest_v, collections.Mapping):\n msg = \"Attempted to merge {0!r} with {1!r}\".format(dest_v, v)\n raise TypeError(msg)\n\n dest[k] = merge_dicts(dest_v, v)\n else:\n dest[k] = src[k]\n\n return dest", "def merge_two_dicts(x, y):\n\tz = x.copy()\n\tz.update(y)\n\treturn z", "def _merge_dicts(*args):\n return reduce(lambda d, s: d.update(s) or d, args)", "def merge_two_dicts(x, y):\r\n z = x.copy()\r\n z.update(y)\r\n return z", "def merge_default_dict(*dicts):\n\tmerged = defaultdict(int)\n\tfor dict_ in dicts:\n\t\tfor k, v in dict_.items():\n\t\t\tmerged[k] += v\n\treturn dict(sorted(merged.items(), key=lambda x: x[1], reverse=True))", "def dictmerge(x, y, path=None, overwrite=False, extend=False):\n if path is None:\n path = []\n for key in y:\n if key in x:\n if isinstance(x[key], (dict, MutableMapping)) and isinstance(\n y[key], (dict, MutableMapping)\n ):\n dictmerge(\n x[key],\n y[key],\n path + [str(key)],\n overwrite=overwrite,\n extend=extend,\n )\n elif x[key] == y[key]:\n pass # same leaf value\n else:\n if not overwrite:\n raise Exception(\"Conflict at %s\" % \".\".join(path + [str(key)]))\n if isinstance(x[key], list) and isinstance(y[key], list) and extend:\n x[key].extend(y[key])\n else:\n x[key] = y[key]\n else:\n x[key] = y[key]\n return x", "def concat_dicts(self, dict1, dict2):\n result = dict()\n for key, value in dict1.items():\n if len(value.shape) == 1:\n result[key] = np.concatenate([value, dict2[key]])\n else:\n result[key] = np.vstack([value, dict2[key]])\n return result", "def merge(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result", "def merge_dict(lhs, rhs, override=True):\n if not isinstance(lhs, dict) or not isinstance(rhs, dict):\n if override:\n return rhs\n else:\n return lhs\n\n for key, value in rhs.items():\n if key not in lhs:\n lhs[key] = rhs[key]\n else:\n lhs[key] = merge_dict(lhs[key], value, override)\n\n return lhs", "def MergeDictionary(self, dict1, dict2):\r\n for key_2, val_2 in dict2.items():\r\n for key_2_2, val_2_2 in val_2.items():\r\n if key_2 in dict1 and key_2_2 in dict1[key_2]:\r\n dict1[key_2][key_2_2] += val_2_2\r\n else:\r\n self.addtwodimdict(dict1, key_2, key_2_2, val_2_2)\r\n return dict1", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def mergeDictionary(dict_1, dict_2):\n for key, value in dict_2.items():\n if key in dict_1:\n if isinstance(dict_1[key], list):\n dict_1[key].append(value)\n else:\n temp_list = [dict_1[key]]\n temp_list.append(value)\n dict_1[key] = temp_list\n else:\n dict_1[key] = value\n return dict_1", "def make_transection(dict1, dict2):\n _res = {}\n for _key in dict1:\n if dict2.get(_key, None):\n _res[_key] = dict2.get(_key)\n return _res", "def dict_combine(dicts):\n result = {}\n for dic in dicts:\n for i in dic.keys():\n if i not in result.keys():\n result[i] = dic[i]\n else:\n lst = []\n lst.append(result[i])\n lst.append(dic[i])\n result[i] = lst\n return result", "def _join_dicts(dicts):\n if dicts is None: # pragma: no cover\n return\n assembled_dict = {k: v for D in dicts for k, v in D.items()}\n return assembled_dict", "def _merge_dicts(dict1, dict2, path=None):\n if path is None:\n path = []\n for key, value2 in six.iteritems(dict2):\n if key not in dict1:\n dict1[key] = value2\n continue\n value1 = dict1[key]\n if value1 == value2:\n continue\n elif isinstance(value1, dict) and isinstance(value2, dict):\n _merge_dicts(dict1[key], dict2[key], path=path+[key])\n elif isinstance(value1, (set, frozenset)) and isinstance(value2, (set, frozenset, list)):\n dict1[key] = value1 | frozenset(value2)\n else:\n raise TypeError(\"Cannot merge {} with {} at {}\".format(type(value1), type(value2), '/'.join(path)))\n return dict1", "def dict_merge(self, merge_to, merge_from) -> dict:\n if merge_to is None:\n merge_to = {}\n\n for k, v in merge_from.items():\n if isinstance(v, collections.abc.Mapping):\n merge_to[k] = self.dict_merge(merge_to.get(k, {}), v)\n else:\n merge_to[k] = self._decode_value(v)\n return merge_to", "def merge_dict_recursive(target, src):\r\n for k in src.keys():\r\n if ((k in target and isinstance(target[k], dict) and\r\n isinstance(src[k], collections.Mapping))):\r\n merge_dict_recursive(target[k], src[k])\r\n else:\r\n target[k] = src[k]", "def merge_unique_dicts(*args, **kwargs):\n args = list(args) + [dict(kwargs)]\n conflicts = set()\n result = {}\n for arg in args:\n conflicts |= set(arg.keys()) & set(result.keys())\n result.update(arg)\n if conflicts:\n raise ValueError('Multiple dicts contain the same keys: {}'\n .format(', '.join(sorted(str(key)\n for key in conflicts))))\n return result", "def custom_extend_dict(dict1, dict2):\n common_keys = set([*dict1]).intersection([*dict2])\n for key in common_keys:\n if dict1[key] == dict2[key]:\n continue\n if not dict1[key]:\n dict1[key] = dict2[key]\n else:\n if isinstance(dict2[key], dict) and isinstance(dict1[key], dict):\n dict2[key] = custom_extend_dict(dict1[key], dict2[key])\n elif isinstance(dict1[key], dict):\n dict2[key] = dict1[key]\n elif not isinstance(dict1[key], list):\n if dict1[key]:\n dict2[key] = dict1[key]\n else:\n dict1[key].extend(dict2[key])\n dict2.pop(key)\n\n dict1.update(dict2)\n return dict1", "def mergedicts(dict1, dict2):\n for k in set(dict1.keys()).union(dict2.keys()):\n if k in dict1 and k in dict2:\n if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):\n yield (k, dict(mergedicts(dict1[k], dict2[k])))\n else:\n # If one of the values is not a dict, you can't continue merging it.\n # Value from second dict overrides one in first and we move on.\n yield (k, dict2[k])\n # Alternatively, replace this with exception raiser to alert you of value conflicts\n elif k in dict1:\n yield (k, dict1[k])\n else:\n yield (k, dict2[k])", "def merge_dict(target, addition):\n for key in addition:\n if key in target and isinstance(target[key], dict) \\\n and isinstance(addition[key], dict):\n merge_dict(target[key], addition[key])\n else:\n target[key] = addition[key]", "def _merge_a_into_b(a, b):\n for k, v in a.items():\n # a must specify keys that are in b\n # if k not in b:\n # raise KeyError('{} is not a valid config key'.format(k))\n\n # recursively merge dicts\n if isinstance(v, dict):\n if not b.get(k, False):\n b[k] = v\n else:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print(\"Error under config key: {}\".format(k))\n raise\n else:\n b[k] = v", "def merge_dicts(dict1, dict2, custom_merge_func=None):\n # type: (Any, Any, Optional[Callable[[str, Any, Any, Any], Any]]) -> Any\n if not isinstance(dict1, dict) or not isinstance(dict2, dict):\n return dict2\n for k in dict2:\n if k in dict1:\n res = None\n if custom_merge_func:\n res = custom_merge_func(k, dict1[k], dict2[k], _not_set)\n dict1[k] = merge_dicts(dict1[k], dict2[k], custom_merge_func) if res is _not_set else res\n else:\n dict1[k] = dict2[k]\n return dict1", "def merge_dicts_of_vectors(d1,d2):\n \n \n for key in d2:\n \n #adds a key from d2 in d1 if the key doesnt exist in d1 \n if key not in d1:\n \n d1[key] = d2[key] \n \n \n #if key in d2 \n elif key in d1:\n \n #iterates through every key value in the inner dictionnaries of d2\n for key2 in d2[key]:\n \n #if inner key are also found in d1\n if key2 in d1[key]:\n #updates the key inner keys of d1 by adding the values of d2\n d1[key][key2]+= d2[key][key2]\n \n #if inner key are not found in d1\n elif key2 not in d1[key]:\n #creates a new inner key in d1 and assigns the value of the inner key in d2\n d1[key][key2] = d2[key][key2]", "def _helm_merge(a, b):\n if not (isinstance(b, dict) and isinstance(a, dict)):\n # if either one is not a dict,\n # there's no merging to do: use 'b'\n return b\n for key, value in b.items():\n if key in a:\n a[key] = _helm_merge(a[key], value)\n else:\n a[key] = value\n return a", "def _merge_a_into_b(a, b):\n if type(a) is not edict:\n return\n\n for k, v in a.items():\n # recursively merge dicts\n if type(v) is edict:\n try:\n b[k] = edict()\n _merge_a_into_b(a[k], b[k])\n except:\n print('Error under config key: {}'.format(k))\n raise\n else:\n b[k] = v", "def aiida_dict_merge(to_dict, from_dict):\n to_dict = to_dict.get_dict()\n\n if isinstance(from_dict, Dict):\n from_dict = from_dict.get_dict()\n\n dict_merge(to_dict, from_dict)\n\n return Dict(dict=to_dict)", "def merge_dicts(*dict_args):\n result = OrderedDict()\n for dictionary in dict_args:\n result.update(dictionary)\n return result", "def dict_merge(base, upd, inplace=False):\n assert quacks_like_dict(base), quacks_like_dict(upd)\n dst = base if inplace else deepcopy(base)\n\n stack = [(dst, upd)]\n while stack:\n current_dst, current_src = stack.pop()\n for key in current_src:\n if key not in current_dst:\n current_dst[key] = current_src[key]\n else:\n if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :\n stack.append((current_dst[key], current_src[key]))\n else:\n current_dst[key] = current_src[key]\n return dst", "def merge_dicts(source, destination):\n for key, value in source.items():\n if isinstance(value, dict):\n node = destination.setdefault(key, {})\n merge_dicts(value, node)\n else:\n destination[key] = value\n\n return destination", "def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result", "def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result", "def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result", "def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result", "def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result", "def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result", "def _add_dict_values(self, d1, d2):\n\n if d1 is None and d2 is None:\n return None\n\n d1 = d1 or {}\n d2 = d2 or {}\n\n added = {}\n for key in set(list(d1.keys()) + list(d2.keys())):\n added[key] = dict(d1.get(key, {}), **(d2.get(key, {})))\n return added", "def merge_two_dicts(self, x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_dicts(source, destination):\n for key, value in source.items():\n key = key.lower() if isinstance(key, str) else key\n if isinstance(value, Mapping):\n node = destination.setdefault(key, {})\n merge_dicts(value, node)\n else:\n destination[key] = value\n return destination", "def combine_dict(self, dict2):\n # iterate through smaller data set\n # base_set will be the larger set and is used for updating\n if len(self.content[\"values\"]) > len(dict2[\"values\"]):\n large_set = self.content[\"values\"]\n small_set = dict2[\"values\"]\n base_set = self.content\n else:\n small_set = self.content[\"values\"]\n large_set = dict2[\"values\"]\n base_set = dict2\n\n subset = {}\n for key in small_set.keys():\n # determine wether to compare keys\n if key in large_set:\n updated_l = large_set[key][\"updated_at\"]\n updated_s = small_set[key][\"updated_at\"]\n if updated_l == 'NULL':\n if updated_s != 'NULL':\n # update to not NULL set\n # if both updated_at are NULL, things\n # are ambiguos. We could defer to created_at\n # but for simplicity we will default to\n # the values in the larger set\n subset[key] = small_set[key]\n else:\n if updated_s == 'NULL':\n # update to not NULL set\n subset[key] = large_set[key]\n else:\n if updated_l > updated_s:\n subset[key] = large_set[key]\n else:\n subset[key] =small_set[key]\n else:\n subset[key] = small_set[key]\n base_set[\"values\"].update(subset)\n new_obj = BackupData()\n new_obj.load_from_dict(base_set)\n return new_obj", "def dict_merge(a, b, path=None):\n\n if path is None: path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n dict_merge(a[key], b[key], path + [str(key)])\n else:\n a[key] = b[key]\n return a", "def merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n result.update(dictionary[0])\n return result", "def concatDic(dic1, dic2):\n pass", "def merge_dict(a: dict, b: dict, path=None) -> dict:\n if path is None:\n path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n merge_dict(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n a[key] = b[key]\n else:\n a[key] = b[key]\n return a", "def merge_into(a, b):\n\tfor k, v in b.items():\n\t\ta[k] = a.get(k, 0) + v", "def merge_dicts(*dict_args: Dict[T, U]) -> Dict[T, U]:\n result: Dict[T, U] = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result", "def merge_dicts(base, changes):\n for k, v in changes.items():\n if isinstance(v, dict):\n merge_dicts(base.setdefault(k, {}), v)\n else:\n base.setdefault(k, v)", "def merge_dict(d: dict, overwrite=False, inplace=False, **kwargs):\n nd = dict([(k, v) for k, v in d.items()] + [(k, v) for k, v in kwargs.items() if overwrite or k not in d])\n if inplace:\n d.update(nd)\n return d\n return nd", "def _merge_list_of_dict(first, second, prepend=True):\n first = _cleanup(first)\n second = _cleanup(second)\n if not first and not second:\n return []\n if not first and second:\n return second\n if first and not second:\n return first\n # Determine overlaps\n # So we don't change the position of the existing terms/filters\n overlaps = []\n merged = []\n appended = []\n for ele in first:\n if _lookup_element(second, next(iter(ele))):\n overlaps.append(ele)\n elif prepend:\n merged.append(ele)\n elif not prepend:\n appended.append(ele)\n for ele in second:\n ele_key = next(iter(ele))\n if _lookup_element(overlaps, ele_key):\n # If there's an overlap, get the value from the first\n # But inserted into the right position\n ele_val_first = _lookup_element(first, ele_key)\n merged.append({ele_key: ele_val_first})\n else:\n merged.append(ele)\n if not prepend:\n merged.extend(appended)\n return merged", "def zip_dict(a: Dict[str, A], b: Dict[str, B]) \\\n -> Dict[str, Tuple[Optional[A], Optional[B]]]:\n return {key: (a.get(key), b.get(key)) for key in a.keys() | b.keys()}", "def dict_merge( dict_1, dict_2 ):\n\n if dict_1 is None:\n if dict_2 is None:\n # or return None\n return_val = None\n else:\n # or\n return_val = dict_2\n\n else:\n if dict_2 is None:\n # or return\n return_val = dict_2\n else:\n return_val = { **dict_1 , **dict_2}\n\n\n print( f\"dict_merg return_value = >return_val<\")\n return return_val", "def merge_dicts(dicts):\r\n ret = defaultdict(list)\r\n for d in dicts:\r\n ret.update(d)\r\n return ret", "def test_merge_aggregate_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def mergeDict(old: dict, new: dict, layer=1) -> dict:\n \n from collections import Mapping\n changed = False\n for key, val in new.items():\n # print(\"{} ({})\".format(key, type(old.get(key))))\n if not key in old:\n print(\"{}Adding new value {}\".format(' ' * layer, key))\n changed = True\n old[key] = val\n elif issubclass(type(old[key]), Mapping) and issubclass(type(val), Mapping):\n print(\"{}Merging dict {}\".format(' ' * layer, key))\n changed = changed or mergeDict(old[key], val, layer + 1)\n\n return changed", "def merge_dict(change_dict: Dict[str, Any], orig_dict: MutableMapping[str, Any]):\n for k, v in change_dict.items():\n if not orig_dict.get(k):\n orig_dict[k] = v\n else:\n if isinstance(orig_dict[k], dict) and isinstance(v, dict):\n merge_dict(v, orig_dict[k])\n else:\n orig_dict[k] = v", "def dict_merge(*dictionaries):\n\n merged_dict = {}\n\n def merge(source, defaults):\n source = copy.deepcopy(source)\n # Nested merge requires both source and defaults to be dictionary\n if isinstance(source, dict) and isinstance(defaults, dict):\n for key, value in defaults.items():\n if key not in source:\n # Key not found in source : Use the defaults\n source[key] = value\n else:\n # Key found in source : Recursive merge\n source[key] = merge(source[key], value)\n return source\n\n for merge_with in dictionaries:\n merged_dict = merge(merged_dict, copy.deepcopy(merge_with or {}))\n\n return merged_dict", "def merge_results(res1, res2):\n empty = []\n keys = set(res1).union(res2)\n return dict((k, res1.get(k, empty) + res2.get(k, empty)) for k in keys)", "def _merge_a_into_b(a, b):\n if type(a) is not edict:\n return\n\n for k, v in a.items():\n # a must specify keys that are in b\n if k not in b:\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print('Error under config key: {}'.format(k))\n raise\n else:\n b[k] = v", "def _merge_a_into_b(a, b):\n if type(a) is not edict:\n return\n\n for k, v in a.items():\n # a must specify keys that are in b\n if k not in b:\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print(('Error under config key: {}'.format(k)))\n raise\n else:\n b[k] = v", "def merge_dicts(primary, secondary, deepcopy=False):\n # Objective: assemble `out` from\n # (1) `primary` <has a higher priority>\n # (2) `secondary`\n\n out = {}\n if deepcopy:\n two = _copy.deepcopy(secondary)\n else:\n two = secondary.copy()\n out.update(primary)\n\n # Remove those same keys from `secondary`:\n for key in primary.iterkeys():\n two.pop(key, None)\n\n # Then append any remaining values in `secondary` into `out`. However\n # first deepcopy those values, if we've been asked to:\n if deepcopy:\n out.update(_copy.deepcopy(two))\n else:\n out.update(two)\n return out", "def merge(dest, update, *, key_check=False, silent=False):\n # [From StackOverflow](http://stackoverflow.com/a/3233356)\n # Modified to raise ValueError in case of type mismatch, and ignore new keys\n for key, value in update.items():\n if key_check:\n if key not in dest:\n if not silent:\n print(f\"Useless key '{key}' with value '{value}'\")\n continue\n if not isinstance(value, type(dest[key])):\n raise ValueError(f\"Type mismatch on key '{key}'\")\n if isinstance(value, collections.Mapping):\n temp = dest.get(key, {})\n if not isinstance(temp, dict):\n temp = {}\n merge(temp, value, key_check=key_check) # Recursively merge internal dictionary\n dest[key] = temp\n else:\n dest[key] = update[key] # Regular key update\n return dest" ]
[ "0.76170945", "0.76170945", "0.7527569", "0.749968", "0.7440527", "0.7404144", "0.73930734", "0.73853135", "0.73769873", "0.73720855", "0.7343238", "0.72534424", "0.7251481", "0.72268325", "0.7226796", "0.72029424", "0.7192793", "0.71703434", "0.7158246", "0.71368694", "0.712719", "0.712719", "0.712719", "0.71171445", "0.7113267", "0.7087117", "0.70606565", "0.70598423", "0.70410675", "0.7034034", "0.70171195", "0.70043886", "0.6997935", "0.6993707", "0.6976762", "0.6973969", "0.6949176", "0.6946965", "0.6933097", "0.691815", "0.6916517", "0.69098544", "0.69098544", "0.69098544", "0.69098544", "0.69098544", "0.69098544", "0.69098544", "0.69098544", "0.69086385", "0.68923604", "0.6892159", "0.68701994", "0.68680215", "0.68569124", "0.68567455", "0.6853758", "0.6840067", "0.68336713", "0.6807033", "0.67832327", "0.67618203", "0.674322", "0.6742428", "0.67388034", "0.6735342", "0.67315894", "0.67277634", "0.67240095", "0.6709402", "0.6709402", "0.6709402", "0.6709402", "0.6709402", "0.6709402", "0.66925526", "0.6666106", "0.66593724", "0.6656577", "0.665092", "0.6650292", "0.66501874", "0.6643431", "0.6643138", "0.6640491", "0.6624273", "0.6617048", "0.6616014", "0.6609965", "0.658923", "0.6584879", "0.6578852", "0.6567551", "0.65426797", "0.6528896", "0.6518287", "0.6516222", "0.6514348", "0.650537", "0.64921826" ]
0.6827089
59
Run DFS from some (starting or intermediate) State until termination
def run_dfs(self,s): if self.verbose: print('entering run_dfs with s = ',s) new_states = [self.succ(s,a) for a in self.actions(s)] results = [] for ns in new_states: if self.verbose: print('considering new state = ',ns) end = self.is_end(ns) if end: result = self.result(ns) if result is not None: results.append(result) else: results += self.run_dfs(ns) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DFS(initial_state, check_dict): \r\n \r\n print(\"Implementing DFS...\")\r\n q = deque()\r\n q.append(initial_state)\r\n accomplished = False\r\n \r\n while len(q) != 0:\r\n path = q.pop()\r\n \r\n if is_goal(path[-1][0]):\r\n goal = path\r\n accomplished = True\r\n break\r\n \r\n state_container = next_possible_states(path, check_dict, False)\r\n for i in state_container:\r\n if len(path) <= 1:\r\n temp = list(path)\r\n temp.append(i)\r\n q.append(temp)\r\n else:\r\n if i[0] != path[-2][0]:\r\n temp = list(path)\r\n temp.append(i)\r\n q.append(temp)\r\n\r\n \r\n if accomplished:\r\n print(\"Solved! Number of moves:\", len(goal) - 1)\r\n return goal, True\r\n else:\r\n print(\"Cannot be solved. Number of moves:\", len(path) - 1)\r\n print(path)\r\n return path, False", "def dfs( self ):\n\n #print self.state; \n #print self.visited;\n SearchProblem.stateVisited= SearchProblem.stateVisited+1 \n \n if self.stop: # check class variable and stop searching...\n return;\n\n for action in self.edges(): # consider each edge leading out of this node\n\n action.destination.path = self.path + str(action.label); \n # get the label associated with the\n # action and append it to the path\n # string\n\n action.destination.visited = self.visited.copy();\n # make copy of source node's visited set\n # and use it as destination node's\n # visited set\n\n action.destination.visited.add( repr(action.destination.state) );\n\n if action.destination.is_target(): \n # check if destination of edge is target node\n action.destination.target_found(); # perform target found action\n if not self.continue_search(): # stop searching if not required\n SearchProblem.stop = True; # set class variable to record that we\n break; # are done\n\n if repr(action.destination.state) in self.visited:\n continue; # skip if we've visited this one before\n\n action.destination.dfs(); # resume recursive search ", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n # print(\"Start:\", problem.getStartState())\n # print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n # print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n\n # Initialize a frontier, and push the initial state into the frontier\n frontier = util.Stack()\n frontier.push([(problem.getStartState(), 'move', 0)])\n # Initialize a explored set to store the visited nodes\n exploredSet = set()\n\n # Check the content of frontier\n while not frontier.isEmpty():\n stateList = list()\n stateList = frontier.pop()\n # print (stateList)\n # What we focus on is the next state, not the (previous state + next state), so we should take the last element\n nextState = stateList[len(stateList) - 1]\n # Check the current state is goal or not\n if problem.isGoalState(nextState[0]):\n # Initial a path, which is the way to the goal state\n path = list()\n for eachMove in stateList:\n path.append(eachMove[1])\n # If the initial state is the goal state, there's no need to explore other nodes, so that's called special condition\n if len(path) == 1:\n return path[0]\n # This is the normal condition, we should convey the path except the first one, because we haven't define what's \"move\"\n else:\n return path[1:]\n # If this is a state which we don't visit, add it to the explored set(this is called GSA)\n if not nextState[0] in exploredSet:\n exploredSet.add(nextState[0])\n # Give me your child nodes\n for childState in problem.getSuccessors(nextState[0]):\n nextStateList = stateList[:]\n # we focus on the path, so we have to record the every move from the initial state to the current one\n nextStateList.append(childState)\n frontier.push(nextStateList)\n\n # Or maybe there's no way to the goal state\n else:\n return \"There's no way.\"", "def depthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Stack() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def dfs(state):\n\n #if the current state is a goal state, then return it in a list\n if state.is_goal():\n return [state]\n else:\n # else, recurse on the possible next states\n result = []\n \n for s in state.next_states():\n # append all of the s\n result += dfs(s)\n \n return result", "def dfs(self):\n def add_to_stack(stack, done, src, path):\n for dest in self.edges[src]:\n if dest not in done:\n for step_path in self.edges[src][dest]:\n stack.append((dest, step_path, path))\n done.add(src)\n stack = [] # Stack of steps to take\n done = set() # Nodes we've visited\n # Seed the stack with all edges from the start cell.\n add_to_stack(stack, done, self.start_cell, '')\n while stack:\n (src, step_path, path) = stack.pop()\n path = path + step_path\n if src == self.exit_cell:\n return path\n add_to_stack(stack, done, src, path)\n return '' # No path found.", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n from game import Directions\n visited = set() # unique elements\n state = problem.getStartState()\n #returns starting agent's position\n waiting_list = util.Stack()\n # LIFO\n # last in first out\n # parents = collections.defaultdict(collections.UserDict)\n parents = {}\n #dictionary\n sequence = []\n #LIFO\n for action in problem.getSuccessors(state):\n # in order to push full-state values\n waiting_list.push(action)\n # enumarating tuple\n\n while not waiting_list.isEmpty():\n state = waiting_list.pop()\n \n visited.add(state[0])\n # node is visited and we wont visit those nodes\n \n for substate in problem.getSuccessors(state[0]):\n # take a look to successors of current node\n \n if substate[0] not in visited:\n # if not in visited \n # saving parents\n parents[substate[0]]={'parent':state} \n # generate new node\n waiting_list.push(substate)\n # push to stack\n if problem.isGoalState(substate[0]): \n target_state = substate \n #finding wayback\n\n\n while target_state[0] in parents.keys():\n temp=parents[target_state[0]]['parent']\n sequence.append(target_state[1])\n target_state = temp\n sequence.append(target_state[1])\n return sequence[::-1]", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITH FOR DFS\n \n function graph-search(problem, fringe) retuen a sloution or failure\n \n closed <-- an empty set\n fringe <-- insert (make-node (initial-state [problem]), fringe)\n \n loop do :\n if fringe is empty then return failure\n node <-- Remove-front (fringe)\n if goal-test (problem, state[node]) then return node\n if state[node] is not in closed then \n add STATE[node] to closed\n for child-node in EXPAND(STATE[node],problem) do\n fringe <-- Insert (child-node, fringe)\n end\n end\n \"\"\"\n\n templist=[]\n explored = set()\n fringe = util.Stack()\n #print \"the stat node is : \", problem.getStartState()\n\n fringe.push((problem.getStartState(),templist))\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n # print \"Pacman is currently at : \", currentNode\n if problem.isGoalState(currentNode):\n # print \" Goal State Found : \", currentNode\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n # print \"Adding current node to explored\"\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # print \"child node : \", childNode , \" is added \"\n fringe.push((childNode[0],currDir+[childNode[1]]))\n\n return pathToGoal", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n '''\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState((2,2))\n print \"Start's successors:\", problem.getSuccessors((1,1))\n suc=problem.getSuccessors(problem.getStartState())\n actionList=[]\n stateList=[]\n import random\n randomNum=random.randrange(0,len(suc),1)\n \n \n print len(suc)\n #for i in range(1000):\n while not problem.isGoalState(suc[randomNum][0]):\n\tprint randomNum\n\trandomNum=random.randrange(0,len(suc),1)\n\trandomAction=suc[randomNum][1]\n\t\n \t#print randomNum\n\tif suc[randomNum][0] not in stateList:\n\t\tstateList.append(suc[randomNum][0])\n\t\tactionList.append(randomAction)\n \t\tsuc=problem.getSuccessors(suc[randomNum][0]) \n \n #actionList.append(suc[randomNum][0])\n #if kiki==0:\n print actionList\n \n return actionList\n\n\n #util.raiseNotDefined()\n '''\n return DFS(problem,problem.getStartState(),[])", "def iterativeDeepeningSearch(problem):\n \"*** YOUR CODE HERE FOR TASK 1 ***\"\n\n # Retrieve the init state\n # state model ( (position, depth), path, cost)\n initState = ( (problem.getStartState(), 1) , ['Stop'], 0)\n limit = 1\n while True:\n # Initialization each iteration\n open = util.Stack()\n open.push(initState)\n closed = {}\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0][0]\n currDepth = currState[0][1]\n currPath = currState[1]\n currCost = currState[2]\n\n closed[currPos] = currCost\n if currDepth <= limit:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n nextDepth = currDepth + 1\n for each in successors:\n nextCost = currCost + each[2]\n nextPath = currPath + [each[1]]\n if each[0] not in closed.keys() or nextCost < closed[each[0]]:\n temp = ( (each[0], nextDepth), nextPath, nextCost)\n open.push(temp)\n if problem.isGoalState(temp[0][0]):\n return nextPath[1:]\n limit += 1", "def dfs(self, start_node, cbfunc):\n visited = set()\n stack = [start_node]\n\n while len(stack) != 0:\n node = stack.pop()\n if node in visited:\n continue\n cbfunc(node)\n visited.add(node)\n for neighbor_node in node.each_neighbor():\n stack.append(neighbor_node)", "def solveOneStep(self):\n ### Student code goes here\n if self.currentState.state == self.victoryCondition:\n self.visited[self.currentState]=True\n return True\n return self.BFS()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # Frontier stored in a Stack\n frontier = util.Stack()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there]) \n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n \n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n \n frontier.push((coordinates, pathTaken + [direction]))\n\n\n util.raiseNotDefined()", "def dfs_iter(graph, start):\n # vkladam uzel a index potencialniho naslednika, kterym mam pokracovat\n stack = [(start, 0)]\n time = 1\n graph.discovery_time[start] = time\n graph.visited[start] = True\n\n while stack: # not empty\n u, v = stack.pop()\n\n while v < graph.size and not is_edge(graph, u, v):\n v += 1\n\n if v < graph.size:\n # found successor, u is not yet finished\n stack.append((u, v + 1))\n\n if not graph.visited[v]:\n # we have discovered v\n stack.append((v, 0))\n graph.parent[v] = u\n graph.visited[v] = True\n time += 1\n graph.discovery_time[v] = time\n else:\n # u has no more successors\n time += 1\n graph.finishing_time[u] = time", "def depthFirstSearch(problem):\n #print \"Start:\", problem.getStartState()\n #print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n #print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n \n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n #created a frontier Stack for DFS\n #Here the stack acts as a LIFO stack\n neighbourNodes = util.Stack()\n #created a list of moves which will be returned in then end\n moves = []\n #pushed the start node and empty moves list, onto the frontier stack\n neighbourNodes.push((problem.getStartState(),moves))\n #this is a set of nodes which have been seen, to avoid adding nodes already visited \n seenNodes = set()\n #condition evaluated based on the existence of elements in the frontier stack\n while not neighbourNodes.isEmpty():\n #last node in the stack is popped and its state and action is stored\n poppedNodeState, poppedNodeAction = neighbourNodes.pop()\n #condition to check if the node is already been visited\n if(poppedNodeState in seenNodes):\n #if yes then it just skips the iteration using the continue statement\n continue\n #condition to check if the current node is the goal node\n if problem.isGoalState(poppedNodeState):\n #if yes then return the action or moves to be performed list\n return poppedNodeAction\n #if not visited before then node is added to the seenNodes set\n seenNodes.add(poppedNodeState)\n #loop to parse the successor nodes and check and add them to the frontier stack\n for state, action, cost in problem.getSuccessors(poppedNodeState):\n #checking if the successor node has already been visited before\n if(state in seenNodes):\n #if yes then it skips that node\n continue\n #else it adds that successor along with it action appeneded with the already existing actions\n neighbourNodes.push((state, poppedNodeAction+[action]))\n #the list of moves if finally returned\n return moves\n #util.raiseNotDefined()", "def dfs(self, initialSt, goalSt): # Depth­First Search\n\n self.__reset_all_variables()\n\n start = time.perf_counter()\n\n frontier = deque() # deque will be treated as a stack\n frontier.append(initialSt)\n frontier_U_explored = set()\n frontier_U_explored.add(initialSt) # for fasten up the lookup time\n explored = set()\n\n max_frontier_size = 0\n max_ram_used = psutil.virtual_memory().used\n max_depth = initialSt.depth\n\n while len(frontier):\n currentState = frontier.pop()\n explored.add(currentState)\n frontier_U_explored.add(currentState)\n\n max_depth = currentState.depth if currentState.depth > max_depth else max_depth\n\n if goalSt == currentState:\n\n end = time.perf_counter()\n\n self.__success(initialSt,\n currentState,\n len(explored)-1,\n len(frontier),\n max_frontier_size,\n max_depth,\n end-start,\n max_ram_used,\n \"dfs\")\n return True\n\n h = currentState.children()\n h.reverse()\n for child in h:\n if child not in frontier_U_explored:\n frontier.append(child)\n frontier_U_explored.add(child)\n\n max_frontier_size = len(frontier) if len(\n frontier) > max_frontier_size else max_frontier_size\n max_ram_used = psutil.virtual_memory().used if psutil.virtual_memory(\n ).used > max_ram_used else max_ram_used\n\n return False", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state #state of the game\n self.parent = parent #parent of the node\n self.action = action #action that led to that node\n self.pathCost = pathCost #total cost of tha path until that node\n\n def solution(self): #return the path to the goal node\n path = [] #path is a list of actions\n tempNode = self #temp node is the goal node\n while tempNode.state != problem.getStartState(): #until we get to the initial node\n path.insert(0, tempNode.action) #insert at the start of the list\n tempNode = tempNode.parent #go to the parent of the node\n return path #return list of actions\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost #total cost is the total cost of the parent + the cost of the last action\n child = Node(successor, parent, action, pathCost) #create new child node\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0) #create initial node with start state and no parent\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Stack() #dfs uses a stack\n frontier.push(initialNode) #insert initial node to the stack\n explored = set() #explored nodes are added to a set\n\n while not frontier.isEmpty(): #while stack is not empty\n nextNode = frontier.pop() #extract the last node entered\n explored.add(nextNode.state) #add the state of the node to the explored set\n for successor, action, stepCost in problem.getSuccessors(nextNode.state): #for every successor create a new child\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list: #if child is not already explored or is not in the stack\n if problem.isGoalState(child.state): # if node is goal node we return the path of actions\n return child.solution()\n frontier.push(child) #insert it into the stack\n\n return [] #if stack is empty\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # current path stack\n path_stack = util.Stack()\n action_stack = util.Stack()\n path_stack.push(problem.getStartState())\n\n # visited (so don't )\n visited = []\n visited.append(problem.getStartState())\n\n i = 0\n while not path_stack.isEmpty():\n\n # check goal state\n if problem.isGoalState(path_stack.list[-1]): # check if goal\n return action_stack.list\n\n # get next possible state (choose first in list)\n successors = problem.getSuccessors(path_stack.list[-1])\n forward=False\n for successor in successors:\n ss,aa,_ = successor\n if ss not in visited:\n\n path_stack.push(ss)\n action_stack.push(aa)\n visited.append(ss) # you don't pop visited\n forward=True\n break\n\n # backtrack\n if forward==False:\n path_stack.pop()\n action_stack.pop()\n\n i+=1\n #if i==25:\n # import pdb; pdb.set_trace()\n #print(path_stack.list)", "def dfs(graph, start):\n dfs_rec(graph, start, 0)", "def dfs(starting_vertex):\n s = Stack()\n\n s.push([starting_vertex])\n\n while s.size() > 0:\n p = s.pop()\n l = p[-1]\n\n if l not in new_visited_rooms:\n return p\n neighbors = set(get_neighbors(l))\n \n for n in neighbors:\n new_path = p.copy()\n new_path.append(n)\n s.push(new_path)", "def dfs_recursive(self, starting_vertex, destination_vertex):\n pass # TODO", "def dfs_recursive(self, starting_vertex, destination_vertex):\n pass # TODO", "def dfs(i, adj_dict, edges, checked, cycle, start):\n for vertex in adj_dict[i]:\n pos = search_pos(i, vertex, edges, checked)\n if pos != -1:\n checked[pos] = True\n if vertex[0] == start and not (False in checked):\n cycle.append((vertex[0], i, vertex[1]))\n return True\n if dfs(vertex[0], adj_dict, edges, checked, cycle, start):\n cycle.append((vertex[0], i, vertex[1]))\n return True\n checked[pos] = False\n return False", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited_nodes = []\n start_node = problem.getStartState()\n visited_nodes.append(start_node)\n curr_node = start_node\n q = util.Queue()\n directions = util.Queue()\n q.push(curr_node)\n goal_found = problem.isGoalState(curr_node)\n\n while not goal_found:\n nxt_node_list = problem.getSuccessors(curr_node)\n nxt_node_found = False\n\n # Check if a child can be found which has not been visited\n for node in nxt_node_list:\n nxt_node = node[0]\n move = node[1]\n if nxt_node not in visited_nodes:\n nxt_node_found = True # mark that a child node has been found\n q.push(nxt_node) # add the node in the tree\n directions.push(move) # add the direction\n visited_nodes.append(nxt_node) # mark the node as visited\n break\n\n # If child not found, go to parent\n if not nxt_node_found:\n q.list.pop(0)\n directions.list.pop(0)\n\n if q.isEmpty(): break\n\n curr_node = q.list[0]\n goal_found = problem.isGoalState(curr_node)\n\n final_moves = []\n while not directions.isEmpty():\n final_moves.append(directions.pop())\n \n return final_moves\n #util.raiseNotDefined()", "def _dfs_iteration(self, v):\n stack1 = [v]\n self._visited[v] = True\n while stack1:\n curr = stack1.pop()\n for w in self._G.adj(curr):\n if not self._visited[w]:\n stack1.append(w)\n self._visited[w] = True\n self._pre[w] = curr\n elif self._pre[w] != curr:\n self.cycle = True", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n frontier = util.Stack()\n start_node = problem.getStartState()\n\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,[]))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n explored.add(node[0])\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1][:]\n actions.append(action)\n new_node = (nextState, actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def breadth_first_search(initial_state):\n list_of_processed_nodes = []\n num_unprocessed_nodes = 0#\n num_unconsidered_children = 0#\n\n initial_node = Node(state=initial_state)\n node_deque = collections.deque()\n node_deque.append(initial_node)\n goal_state_found = False\n goal_node = None\n\n while len(node_deque) > 0 and not goal_state_found:\n e = node_deque.popleft()\n #pdb.set_trace()\n if e in list_of_processed_nodes:\n num_unprocessed_nodes += 1\n continue\n else:\n list_of_processed_nodes.append(e)\n\n list_of_children_nodes, num_unconsidered_children = generate_children_nodes(\n curr_node=e, list_of_processed_nodes=list_of_processed_nodes,\n running_count_of_children_dups=num_unconsidered_children#\n )\n \n for child_node in list_of_children_nodes:\n #print 'Node {0} with goal status {1}'.format(child_node.index, child_node.state.snake_ate_food)\n if child_node.state.goal_state_reached():\n #print \"Goal state reached with node index {0}\".format(child_node.index)\n goal_state_found = True\n goal_node = child_node\n break\n else:\n #print \"Adding to deque node index {0}\".format(child_node.index)\n node_deque.append(child_node)\n\n if len(node_deque) == 0 and not goal_state_found:\n print '*'*40\n print 'NO SOLUTION PATH FOUND'\n print '*'*40\n sys.exit(0)\n\n #pdb.set_trace()#\n # Summary & results\n #print '{0} nodes processed!'.format(len(list_of_processed_nodes))\n #print '{0} nodes already visited, skipped!'.format(num_unprocessed_nodes)\n #print '{0} node children skipped!'.format(num_unconsidered_children)\n #os.system('say -v \"Victoria\" \"done\"')\n\n return goal_node, list_of_processed_nodes", "def solveOneStep(self):\n ### Student code goes here\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n current_depth = self.currentState.depth\n found_move = False\n while self.currentState.parent:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n count = self.currentState.nextChildToVisit\n if len(self.currentState.children) > count:\n found_move = True\n break\n if not found_move:\n for all_visited in self.visited.keys():\n all_visited.nextChildToVisit = 0\n current_depth += 1\n if len(self.visited) == 1:\n all_possible_moves = self.gm.getMovables()\n for every_move in all_possible_moves:\n self.gm.makeMove(every_move)\n new_game_state = GameState(self.gm.getGameState(), current_depth, every_move)\n new_game_state.parent = self.currentState\n self.visited[new_game_state] = False\n self.currentState.children.append(new_game_state)\n self.gm.reverseMove(every_move)\n while current_depth != self.currentState.depth:\n count = self.currentState.nextChildToVisit\n self.currentState.nextChildToVisit += 1\n if len(self.currentState.children) > count:\n self.currentState = self.currentState.children[count]\n next_move = self.currentState.requiredMovable\n self.gm.makeMove(next_move)\n else:\n found_move = False\n while self.currentState.parent:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n if len(self.currentState.children) > self.currentState.nextChildToVisit:\n found_move = True\n break\n if not found_move:\n return False\n\n if self.currentState.state != self.victoryCondition:\n self.visited[self.currentState] = True\n all_possible_moves = self.gm.getMovables()\n next_depth = current_depth + 1\n for every_move in all_possible_moves:\n self.gm.makeMove(every_move)\n new_game_state = GameState(self.gm.getGameState(), next_depth, every_move)\n if new_game_state not in self.visited:\n self.visited[new_game_state] = False\n new_game_state.parent = self.currentState\n self.currentState.children.append(new_game_state)\n self.gm.reverseMove(every_move)\n return False\n else:\n return True", "def depth_first_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = []\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.append(state)\r\n\r\n while(len(fringe) > 0):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n for stat in states:\r\n if stat[0] not in path and stat[0] not in fringe:\r\n fringe.append(stat[0])\r\n\r\n while(True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val: #( x, y, z)\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n startState=problem.getStartState()\n currentLocation = startState \n\n #for GSA implementation\n exploredStates = []\n exploredStates.append(startState)\n \n #To transform the graph to stack for better access in DFS\n frontierStack = util.Stack()\n for frontier in problem.getSuccessors(startState):\n frontierRoute = frontier + (frontier[1],)\n frontierStack.push(frontierRoute)\n\n currentRoute = []\n\n #start DFS\n while not(frontierStack.isEmpty()):\n currentStage = frontierStack.pop()\n currentState = currentStage[0]\n currentRoute = currentStage[3]\n\n if problem.isGoalState(currentState): \n break\n if currentState not in exploredStates:\n for frontier in problem.getSuccessors(currentState):\n if frontier[0] not in exploredStates:\n nextRoute = currentRoute + \",\" + frontier[1]\n frontierRoute = frontier + (nextRoute,)\n frontierStack.push(frontierRoute)\n exploredStates.append(currentState)\n \n return currentRoute.split(\",\")\n\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n # Initialize a stack\n open = util.Stack()\n\n # Retrieve the init state\n initState = (problem.getStartState(), ['Stop'], 0)\n open.push(initState)\n closed = []\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0]\n currPath = currState[1]\n currCost = currState[2]\n\n if problem.isGoalState(currPos):\n return currPath[1:]\n else:\n closed.append(currPos)\n if currState not in closed:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n for each in successors:\n if each[0] not in closed:\n temp = (each[0], currPath+[each[1]], currCost+each[2])\n open.push(temp)\n return False", "def iterative_dfs(starting_vertex, graph):\n starting_vertex.discovered = True\n starting_vertex.discovery_edge = Graph.Edge(starting_vertex, None, None) # Dummy edge\n walk = starting_vertex\n\n while walk is not None:\n has_to_go_back = True\n for edge in graph.incident_edges(walk):\n opposite = edge.opposite(walk)\n if not opposite.discovered:\n opposite.discovered = True\n opposite.discovery_edge = edge\n walk = opposite\n has_to_go_back = False\n break\n\n if has_to_go_back:\n walk = walk.discovery_edge.opposite(walk)\n\n starting_vertex.discovery_edge = None # Remove dummy edge", "def dft_recursive(self, starting_vertex, visited = None):\n \"\"\"\n Check if Vertex is in visited\n if NOT visited, add to visited set\n Call dft_recursive on every neighbor \n \n\n \"\"\"\n # 1) base case >> where to stop recursion\n # 2) calls itself from within\n # 3) each iteration approaches base case\n\n # 1) base case >> where to stop recursion\n\n # init a set that persists after recursions loops to save visited\n if visited == None:\n visited = set()\n\n if starting_vertex not in visited: # 1) & 3) Check if vertex has NOT been visited\n visited.add(starting_vertex) # if True, add to visited set\n\n print(starting_vertex)\n\n # perform recursion on neighbor\n for n in self.get_neighbors(starting_vertex):\n self.dft_recursive(n, visited) # 2) ", "def DFS():\n\n\tglobal nonleaves\n\tdict_cp=copy.deepcopy(dict)\n\tnonleaves=[\"Root\"]\n\twaitlist=[\"Root\"]\n\twhile waitlist:\n\t\tpeek=waitlist[-1]\n\t\tif len(dict_cp[peek])>=2:\n\t\t\tsub_node=dict_cp[peek].pop()\n\t\t\tif dict[sub_node]:\n\t\t\t\tnonleaves.append(sub_node)\n\t\t\t\twaitlist.append(sub_node)\n\t\telse:\n\t\t\twaitlist.pop()", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None:\n visited = set()\n visited.add(starting_vertex)\n print(starting_vertex)\n for neighb_vert in self.vertices[starting_vertex]:\n if neighb_vert not in visited:\n self.dft_recursive(neighb_vert, visited)", "def depthFirstSearch(problem):\n\n\n \"*** YOUR CODE HERE ***\"\n st = util.Stack()\n strt = problem.getStartState()\n st.push(strt) \n visited = []\n came_from ={}\n came_from [strt] =(None,None)\n\n while not st.isEmpty():\n state = st.pop()\n if state in visited :\n continue\n visited.append(state)\n if problem.isGoalState(state) :\n break\n nodes = problem.getSuccessors(state)\n for (successor,action,cost) in nodes:\n if successor not in visited :\n st.push(successor)\n came_from[successor] = (state , action) \n \n # exit while\n actions = []\n while(state != strt) :\n (parent,action) =came_from[state]\n state = parent\n actions.append(action)\n actions.reverse()\n return actions", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n #Stack to hold the node that have been visited along with the path taken from the start node to reach that node.\n stack = Stack()\n #Set to hold the node explored.\n explorednode = set()\n #Get the start node.\n startnode = problem.getStartState()\n #Push the starting node on the Stack along with an empty set to know the direction in order to reach the node.\n stack.push((startnode,[]))\n #Loop till the stack is empty\n while stack.isEmpty() is not True:\n #Pop the currentnode and the direction from the stack\n currentnode, direction = stack.pop()\n #We will now add the node to set of explored node.\n explorednode.add(currentnode)\n #If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n #print currentnode, direction\n #The direction holds the way to reach till the goal from the start node.\n #print direction\n return direction\n #Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n #If the successor(child) is not explored\n if successor not in explorednode:\n #Add the successor to the stack along with the path to reach it.\n stack.push((successor, direction + [action]))", "def depthFirstSearch(problem):\n\n explored = set()\n frontier = []\n start_state = problem.getStartState()\n frontier.append(start_state)\n parent_hash = {}\n parent_hash[start_state] = (None, None)\n\n def get_path(state):\n path_stack = util.Stack()\n actions = []\n current = state\n while parent_hash[current][0] is not None:\n path_stack.push(parent_hash[current][0])\n current = parent_hash[current][1]\n while not path_stack.isEmpty():\n actions.append(path_stack.pop())\n\n return actions\n\n while len(frontier):\n node = frontier.pop()\n if problem.isGoalState(node):\n return get_path(node)\n explored.add(node)\n for state, action, _ in problem.getSuccessors(node):\n if state not in explored and state not in frontier:\n parent_hash[state] = (action, node)\n frontier.append(state)", "def dfs(self):\n\n stack = [self.root]\n\n while stack:\n node = stack[-1]\n\n if node.goal:\n return True\n\n if not node.visited:\n node.visited = True\n\n for adj_node in self.return_adj_nodes(node):\n if adj_node and not adj_node.visited and not adj_node.wall:\n stack.append(adj_node)\n break\n else:\n stack.pop()\n\n return False", "def depthFirstSearch(problem):\n stack = Stack()\n\n visited = []\n parent_dict = dict()\n start_state = problem.getStartState()\n stack.push(start_state)\n current_path = []\n actions_dict = dict()\n final_actions = []\n flag = False\n\n if problem.isGoalState(problem.getStartState()):\n return []\n\n while not stack.isEmpty():\n current_state = stack.pop()\n current_path.append(current_state)\n visited.append(current_state)\n\n if problem.isGoalState(current_state):\n break\n\n successors = problem.getSuccessors(current_state)\n\n for s in successors:\n flag = False\n if s[0] not in visited:\n stack.push(s[0])\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n flag = True\n\n\n\n if not successors and not stack.isEmpty() or flag is False:\n current_state = stack.pop()\n while current_path[-1] != parent_dict[current_state]:\n current_path.pop()\n stack.push(current_state)\n\n for i in range(len(current_path)-1):\n final_actions.append(actions_dict[current_path[i],current_path[i+1]])\n\n\n return final_actions", "def depthFirstSearch(problem):\n #\"*** YOUR CODE HERE ***\"\n\n \"\"\"\n Pseudocode:\n function G RAPH-S EARCH ( problem) returns a solution, or failure\n initialize the frontier using the initial state of problem\n initialize the explored set to be empty\n loop do\n if the frontier is empty then return failure\n choose a leaf node and remove it from the frontier\n if the node contains a goal state then return the corresponding solution\n add the node to the explored set\n expand the chosen node, adding the resulting nodes to the frontier\n only if not in the frontier or explored set\n\n \"\"\"\n frontier = util.Stack()\n #print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node)\n #print 'Push ',repr(initial_node.state)\n frontierSet = set([initial_node.state])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove(currNode.state)\n #print 'Remove',repr(currNode.state)\n #print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n #print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored):\n # Si hacemos estas verificaciones entonces cuando se encuentra que un estado que se quiere expandir ya esta en la frontera\n # eliminamos ese estado de la frontera y lo expandimos ahora. Osea, damos prioridad a los nodos nuevos\n if(succNode.state in frontierSet):\n # Recurso'i:\n for frontierNode in frontier.list:\n if frontierNode.state == succNode.state:\n frontier.list.remove(frontierNode)\n frontierSet.remove(frontierNode.state)\n # if ((succNode.state not in explored) and (succNode.state not in frontierSet)): \n # Alternativa segun el libro. Lo que se hace es que se da prioridad a los nodos viejos.\n\n # Aca no verificaba si ya esta en la frontera porque alteraba el orden en el que se visitan los nodos.\n # Por ejemplo cuando esta pendiente (se genero pero no se expandio) un hijo con un estado,\n # pero en un nivel mas profundo se vuelve a generar el mismo estado y se tiene que expandir.\n # Si seguimos el DFS creo que tendriamos que expandir ese nodo ahi y no en la primera llamada donde quedo pendiente.\n \n frontier.push(succNode)\n #print 'Push ',repr(succNode.state)\n frontierSet.add(succNode.state)\n\n #util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n\n frontier = Stack()\n explored = []\n actions = []\n\n class node:\n def __init__(self, path, dad, action):\n self.path = path\n self.dad = dad\n self.action = action\n\n start = node(problem.getStartState(),'','')\n frontier.push(start)\n\n while frontier.isEmpty() == False:\n path = frontier.pop()\n successors = problem.getSuccessors(path.path)\n explored.append(path)\n for vertex in successors:\n achou = False\n for path_ex in explored:\n if vertex[0] == path_ex.path:\n achou = True\n if achou == False:\n successor = node(vertex[0],path.path,vertex[1])\n frontier.push(successor)\n if problem.isGoalState(successor.path):\n while len(explored) > 0:\n ant = explored.pop()\n if ant.path == successor.dad:\n actions.append(successor.action)\n successor = ant\n actions.reverse()\n return actions", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Queue() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # initialize frontier using initial state of problem\n current_state = problem.getStartState()\n frontier = util.Stack()\n frontier.push(current_state)\n\n # initialize explored set to be empty\n explored_set = []\n\n # a dictionary to save how to get to certain states from initial state\n actions_list = {current_state:[]}\n\n # loop while we still have unexplored nodes\n while not frontier.isEmpty():\n\n # choose a leaf node and remove it from frontier\n leaf_node = frontier.pop()\n\n # return the solution if it is the goal state\n if problem.isGoalState(leaf_node):\n return actions_list[leaf_node]\n\n # add the node to explored set\n explored_set.append(leaf_node)\n\n # expand the chosen node\n # and add to the frontier if not in frontier and explored set\n for successor in problem.getSuccessors(leaf_node):\n child, action, _ = successor\n if child not in explored_set and child not in frontier.list:\n frontier.push(child)\n actions_list[child] = actions_list[leaf_node] + [action]\n else:\n # search through all but still can't find a solution -> failed!\n return 'failure'", "def dft_recursive(self, starting_vertex, visited=None):\n \n # for vertex in self.get_neighbors(starting_vertex):\n # if vertex not in visited:\n # visited.add(vertex)\n # self.dft_recursive(vertex, visited)\n # return visited\n if visited == None:\n visited = set()\n print(starting_vertex)\n visited.add(starting_vertex)\n for v in self.get_neighbors(starting_vertex):\n if v not in visited:\n self.dft_recursive(v, visited)", "def solveOneStep(self):\n ### Student code goes here\n state = self.currentState\n #print (type(state))\n self.visited[state] = True\n #print (type(self.gm.getGameState()))\n moves = self.gm.getMovables()\n print (\"CURRENTSTATE\" + str(self.currentState.state))\n print (\"MOVABLES:\")\n if moves:\n for m in moves:\n print (str(m))\n print (\"CHILDINDEX:\")\n print (state.nextChildToVisit)\n print (\"*********\")\n if state.state == self.victoryCondition:\n return True\n #if no child to expand then go back\n if not moves or state.nextChildToVisit >= len(moves):\n self.currentState = state.parent\n if state.requiredMovable is not None:\n self.gm.reverseMove(state.requiredMovable)\n # expand\n else:\n\n next_move = moves[state.nextChildToVisit]\n self.gm.makeMove(next_move)\n state.nextChildToVisit += 1\n\n #if to parent or if visited then skip\n while (((state.parent is not None) and (self.gm.getGameState() == state.parent.state))) or GameState(self.gm.getGameState(), 0, None) in self.visited:\n print (\"PARENT FOUND!\")\n self.gm.reverseMove(next_move)\n if state.nextChildToVisit >= len(moves):\n self.currentState = state.parent\n return False\n else:\n next_move = moves[state.nextChildToVisit]\n self.gm.makeMove(next_move)\n state.nextChildToVisit += 1\n\n next_state = GameState(self.gm.getGameState(), state.depth + 1, next_move)\n next_state.parent = state\n #next_state.requiredMovable = next_move\n state.children.append(next_state)\n self.currentState = next_state\n print (state.nextChildToVisit)\n return False", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n \n st = Stack()\n mapper = {}\n mapper[problem.getStartState()] = None\n\n st.push(problem.getStartState())\n while not(st.isEmpty()):\n vertex = st.pop()\n \n if (problem.isGoalState(vertex)):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0]\n l.reverse()\n print l\n return l\n\n else:\n neigh = problem.getSuccessors(vertex)\n # neigh.reverse()\n # neigh.sort()\n for child in neigh:\n if child[0] not in mapper:\n st.push(child[0])\n mapper[child[0]] = (vertex, child[1])\n # print mapper\n \n # visited = []\n # p = dfsRecursive(problem, problem.getStartState(), st, visited, [])\n # return p\n \n # pathfind = {}\n # st.push(problem.getStartState())\n # iterative approach:\n # while (not st.isEmpty()):\n # point = st.pop() # (x,y)\n # if problem.isGoalState(point):\n # # print point\n # print pathfind\n # # print visited\n # elif (not (point in visited)):\n # visited.append(point)\n # # print pathfind, '\\n'\n # print visited, '\\n'\n # for child in problem.getSuccessors(point):\n # st.push(child[0])\n # pathfind[child[0]] = point #this preemptively adds!\n # util.raiseNotDefined()", "def main():\n\n N, k = map(int, sys.stdin.readline().split())\n initial_state = tuple( map(int, sys.stdin.readline().split()) )\n final_state = tuple( map(int, sys.stdin.readline().split()) )\n\n init_path = [initial_state]\n all_paths = [init_path]\n\n visited_states = set()\n visited_states.add(initial_state)\n\n while all_paths:\n popped_path = all_paths.pop(0)\n current_state = popped_path[-1]\n\n if current_state == final_state:\n solution = popped_path\n break\n\n for state, action in generate_moves(current_state, N, k).items():\n if state not in visited_states:\n visited_states.add(state)\n new_path = popped_path + [action,state]\n all_paths.append(new_path)\n\n display_solution(solution)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #util.Stack() = LIFO for DFS\n #travel down path until end of line unlike BFS, backtrack until there is another path\n\n visited = []\n\n frontier = util.Stack()\n frontier.push( (problem.getStartState(), []) ) \n\n while not frontier.isEmpty():\n node,actions = frontier.pop()\n\n if problem.isGoalState(node):\n return actions\n\n visited.append(node)\n\n for coord,direction,cost in problem.getSuccessors(node):\n if not coord in visited:\n frontier.push((coord, actions + [direction]))\n\n return []", "def dft_recursive(self, starting_vertex):\n \n visited = []\n\n def helper(vert, visited):\n visited.append(vert)\n print(vert)\n\n for child in self.vertices[vert]:\n if child not in visited:\n helper(child, visited)\n\n helper(starting_vertex, visited)", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None: # if visited is None\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n if starting_vertex not in visited: # if starting_vertex has not been visited yet\n print(starting_vertex)\n visited.add(starting_vertex) # add to the set \n\n for neighbor in self.vertices[starting_vertex]: # loop through each neighbor \n self.dft_recursive(neighbor, visited) # call the dft_recursive method on each neighbor ", "def depthFirstSearch(problem):\n\t#print(\"Start:\", problem.getStartState())\n\t#print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n\t#print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n\t\n\n\t\"*** YOUR CODE HERE ***\"\n\n\t# Create the stack, and visited array to keep track of visited nodes.\n\tdfsStack = util.Stack()\n\tvisited = []\n\t# Get the first state in the graph, push to the stack\n\tfirst = problem.getStartState()\n\tdfsStack.push([first, [], 0])\n\n\t# While the stack is not empty, pop the first node from the stack, and check if that state\n # is the goal state. If so, return the actions for that node. Otherwise, append that state\n # to the visited array, get its successors, and push them to the stack.\n\twhile not dfsStack.isEmpty():\n\t\tNewNode = dfsStack.pop()\n\t\tif((problem.isGoalState(NewNode[0]) == True)):\n\t\t\treturn NewNode[1]\n\t\tif(NewNode[0] not in visited):\n\t\t\tvisited.append(NewNode[0])\n\t\t\tfor NextNode in problem.getSuccessors(NewNode[0]):\n\t\t\t\tif NextNode[0] not in visited:\n\t\t\t\t\tdfsStack.push((NextNode[0], NewNode[1] + [NextNode[1]], NextNode[2]))", "def DFS(self, nDepth, treenode, state):\n \n visited = []\n visited.insert(0, (state, treenode))\n \n for index in range(0, nDepth-1): \n actions = self.priorProb(state)\n treenode.expansion(actions)\n treenode.updateU_value(actions)\n treenode, action = treenode.selection() \n state = state.do_move(action).copy()\n visited.insert(0, (state, treenode)) \n \n for index in range(0, len(visited)-1): \n if(visited[index][1].isLeaf() == True):\n value = self.leafEvaluation(visited[index][0])\n else: \n value = visited[index][1].backUp(value)\n visited[-1][1].updateQ_value(value)\n visited[-1][1].updateVisits()\n return visited[-1][1]", "def dft(self, starting_vertex):\n \"\"\" LIFO\n Create a stack \n Push starting Vertex\n Create a set to store visited\n While the stack is NOT empty: e.g. > 0\n Pop the last added Vertex\n Check IF NOT visited:\n Mark as visited\n\n\n Push ALL of neighbors\n \"\"\"\n s = Stack() # Create a stack\n s.push(starting_vertex) # Push starting Vertex\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the stack is NOT empty: e.g. > 0\n v = s.pop() # Pop the last added Vertex\n\n if v not in visited: # Check IF NOT visited: e.g. > 0\n print(v)\n visited.add(v) # Mark as visited\n\n for n in self.get_neighbors(v): # Check IF NOT visited:\n s.push(n) # Push ALL of neighbors ", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n self.visited_states.append(self.currentState.state)\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n # If current state has no children, make children\n if not self.currentState.children:\n for movable_statement in movables:\n # Make the move\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n # print (\"new state \", new_state)\n\n # If the new state hasn't been visited and isn't in the queue then add it as a child and to the queue\n if (new_state not in self.visited_states):\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.visited[new_gs] = True\n self.visited_states.append(new_state)\n self.gs_queue.append(new_gs)\n\n self.gm.reverseMove(movable_statement)\n\n # Return false if no more to explore\n if not self.gs_queue:\n return False\n\n # Revert to state at when current and next start to change\n root_curr = self.currentState\n self.currentState = self.gs_queue.popleft()\n root_new = self.currentState\n\n # Backtrack to when current node and new node start to diverge\n if root_new.depth == root_curr.depth:\n while root_curr.state != root_new.state:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n root_new = root_new.parent\n else:\n while root_curr.requiredMovable:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n\n # Return game master to state that we are exploring\n # Find path between root and current state\n path = []\n currNode = self.currentState\n while currNode != root_curr:\n path.append(currNode.requiredMovable)\n currNode = currNode.parent\n\n # Created backwards path, now make moves from root to current state\n path.reverse()\n for movable_statement in path:\n self.gm.makeMove(movable_statement)\n\n return False", "def recursive_dfs(self, start, end, visited=None, path=None):\n # initialize path list with starting vertice\n if path is None:\n path = [start]\n\n # initialize empty set for visited vertices\n if visited is None:\n visited = set()\n \n # add starting vertice to visited vertices\n visited.add(start)\n\n # store all neighbors of start vertice in set\n neighbors = set([x for x in start.get_neighbors()])\n \n # initialize loop count as 0\n loop_count = 0\n\n # iterate through vertices\n for next in neighbors - visited:\n # remove visited path if it results in a dead end (incomplete)\n if loop_count > 0:\n path.pop()\n\n # add to list of visited vertices\n path.append(next)\n\n # full path completed\n if next == end:\n visited.add(next)\n ordered_vertice_path = ([(x.id) for x in path])\n print(\"There exists a path between vertex %s and %s: TRUE\" %(sys.argv[2], sys.argv[3]))\n print(\"Vertices in the path:\", ','.join(ordered_vertice_path))\n return True\n\n loop_count += 1\n\n self.recursive_dfs(next, end, visited, path)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringeList = util.Stack()\n print \"fringeList\",fringeList\n closedList = {str(problem.getStartState()): ([])} #Hash Map to maintain state to path\n print \"closed list:\", closedList\n isGoalStateArrived = False\n\n # Push start state into fringeList\n fringeList.push((problem.getStartState()))\n\n while not isGoalStateArrived and not fringeList.isEmpty():\n currentNode = fringeList.pop()\n print \"currentNode\",currentNode\n currentNodePath = closedList[str(currentNode)]\n print \"currentNodepath:\",currentNodePath\n # Explore children\n childrenOfCurrentNode = problem.getSuccessors(currentNode)\n print \"childrenOfCurrentNode:\",childrenOfCurrentNode\n for childNode in childrenOfCurrentNode:\n if str(childNode[0]) not in closedList:\n path = copy.copy(currentNodePath)\n path.append(childNode[1])\n print \"child [0] %s, child [1] %s\", childNode[0],childNode[1]\n print \"path \", path\n fringeList.push(childNode[0])\n closedList[str(childNode[0])] = path # Put parent node in closed List\n if problem.isGoalState(childNode[0]):\n isGoalStateArrived = True\n goalState = childNode[0]\n break\n\n if isGoalStateArrived:\n #print closedList[str(problem.getStartState())]\n return closedList[str(goalState)]\n \"util.raiseNotDefined()\"", "def depthFirstSearch(problem):\n \n from game import Directions\n North = Directions.NORTH\n South = Directions.SOUTH\n East = Directions.EAST\n West = Directions.WEST \n \n pathDict = {}\n visited = set()\n #visited start\n visited.add(problem.getStartState())\n #initial successors\n successor = problem.getSuccessors(problem.getStartState())\n for initSucc in successor:\n pathDict[initSucc[0]] = [initSucc[1]]\n #loop\n while (1):\n #if fringe = null, return failure\n if (len(successor) == 0):\n print \"Fringe is empty\"\n return util.raiseNotDefined()\n #(v, path) = fringe.pop\n succLocation = successor[0][0]\n succDirection = successor[0][1]\n del successor[0]\n #if isGoal = true, return path\n if problem.isGoalState(succLocation):\n return pathDict[succLocation]\n #if visited = false\n if succLocation not in visited:\n #visited = true\n visited.add(succLocation)\n #L = expand(v,path)\n tempSuccList = problem.getSuccessors(succLocation)\n #Fringe <- L\n for succ in reversed(tempSuccList):\n successor.insert(0,succ)\n pathDict[succ[0]] = []\n pathDict[succ[0]].extend(pathDict[succLocation])\n pathDict[succ[0]].append(succ[1])", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n while to_move < len(movables):\n # Make the move\n movable_statement = movables[to_move]\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n\n # Find out if this state has already been explored\n visited = False\n for visited_state in self.visited.keys():\n if visited_state.state == new_state:\n visited = True\n\n # If the new state hasn't been visited then add it as a child then move down to this child\n if not visited:\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.currentState = new_gs\n break\n\n # Else skip this state and try going to the next movable statement\n else:\n # print(\"SKIP THIS STATE\")\n self.gm.reverseMove(movable_statement)\n to_move += 1\n\n # Went all the way down to a leaf, backtrack\n if (to_move >= len(movables)):\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n\n return False", "def depthFirstSearch(problem):\n #Initializing variables\n fringe = util.Stack()\n #Creating visited list\n visited = []\n #Pushing start state to Stack\n fringe.push((problem.getStartState(), []))\n #Adding start state to visited list\n visited.append(problem.getStartState())\n \n #Popping point from the stack\n while fringe.isEmpty() == False:\n state, actions = fringe.pop()\n #Getting successor nodes\n for next in problem.getSuccessors(state):\n newstate = next[0]\n newdirection = next[1]\n #Pushing successor nodes to the stack and appending to visited\n if newstate not in visited:\n if problem.isGoalState(newstate):\n return actions + [newdirection] \n else:\n fringe.push((newstate, actions + [newdirection]))\n visited.append(newstate)\n\n util.raiseNotDefined()", "def solveOneStep(self):\n ### Student code goes here\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n current_move = False\n current_depth = self.currentState.depth + 1\n list_movables = self.gm.getMovables()\n\n while not current_move:\n count = self.currentState.nextChildToVisit\n if len(list_movables) <= count:\n if not self.currentState.parent:\n return False\n else:\n self.gm.reverseMove(self.currentState.requiredMovable)\n list_movables = self.gm.getMovables()\n self.currentState = self.currentState.parent\n current_depth = self.currentState.depth + 1\n continue\n\n next_move = list_movables[count]\n self.gm.makeMove(next_move)\n new_game_state = GameState(self.gm.getGameState(), current_depth, next_move)\n if new_game_state in self.visited:\n self.currentState.nextChildToVisit += 1\n self.gm.reverseMove(next_move)\n else:\n self.currentState.nextChildToVisit += 1\n new_game_state.parent = self.currentState\n self.currentState.children.append(new_game_state)\n self.currentState = new_game_state\n current_move = next_move\n\n if self.currentState.state != self.victoryCondition:\n self.visited[self.currentState] = True\n return False\n else:\n return True", "def dfs(start_node, goal_state, limit = None, iterative = False, graphSearch = False, improved_descendants = False):\t\n\tfringe = [start_node]\n\tnumber_nodes_expanded = 0\n\tnumber_nodes_visited = 0\n\n\tt0 = time.time()\n\n\tif graphSearch:\n\t\tclosed = {} #hash_map\n\n\twhile len(fringe) > 0:\n\t\tnumber_nodes_visited += 1\n\t\tnode = fringe.pop()\n\t\tnode.count = number_nodes_visited\n\n\t\tt1 = time.time()\n\t\tif (t1 - t0) > 900:\n\t\t\tprint(\"It took more than 15 min\")\n\t\t\tif iterative:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn False\n\t\t\n\t\tif node.check_solution(goal_state):\n\t\t\t_ = print_solution(node, number_nodes_expanded, goal_state)\n\t\t\tif iterative:\n\t\t\t\treturn True, number_nodes_visited\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\treturn True \n\n\n\t\tif limit == None or node.depth < limit:\n\t\t\tif graphSearch:\n\t\t\t\tnode_hash = node.build_hash()\n\t\t\t\tnode_depth = node.depth\n\t\t\t\t#can also add if it's found i at smaller depth. Grants solution every time\n\t\t\t\tif node_hash not in closed or closed[node_hash] > node_depth:\n\t\t\t\t\tclosed[node_hash] = node_depth\n\t\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\t\tchild_nodes = node.successors(improved_descendants)\n\t\t\t\t\tfor i in range(len(child_nodes)):\n\t\t\t\t\t\tfringe.append(child_nodes[i])\n\t\t\telse:\n\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\tchild_nodes = node.successors(improved_descendants)\n\t\t\t\tfor i in range(len(child_nodes)):\n\t\t\t\t\tfringe.append(child_nodes[i])\n\t\n\tif iterative:\n\t\treturn False, number_nodes_visited\n\t\t\t\n\treturn False", "def depthFirstSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n \n visitedlist = []\n st = Stack()\n outputlist = []\n st.push(problem.getStartState())\n visitedlist.append(problem.getStartState())\n recurseDFS(st,problem,visitedlist)\n if st.isEmpty():\n print \"No Path exist\"\n else:\n while not st.isEmpty():\n value = st.pop()\n if len(value) == 2:\n continue\n if value[1] == 'South':\n outputlist.append(s)\n elif value[1] == 'North':\n outputlist.append(n)\n elif value[1] == 'East':\n outputlist.append(e)\n elif value[1] == 'West':\n outputlist.append(w)\n \n return outputlist[::-1]", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n visitedNodes = []\n actions = []\n fringe = util.Stack()\n cost = 0 \n if (problem.isGoalState(startState) == True):#if startState is the goalState\n return actions\n else :\n # Data Type Format : (currentState,actions,cost) based on errors I got :\\\n fringe.push((startState,actions,cost))\n while (fringe.isEmpty() == False) :\n currentState , actions , cost = fringe.pop()\n if(problem.isGoalState(currentState)):\n return actions\n \n elif ((currentState in visitedNodes) == False ):\n visitedNodes.append(currentState)\n currentNodeSuccessors = problem.getSuccessors(currentState)\n for node in currentNodeSuccessors :\n state , action , cost = node\n if ( (state in visitedNodes) == False ):\n newNode = (state , actions + [action] , cost)\n fringe.push(newNode)\n \n util.raiseNotDefined()", "def dfs(g):\n global time\n time = 0\n\n for v in g:\n v.discovery = 0\n v.finish_time = 0\n v.color = 'white'\n\n for v in g:\n if v.color == 'white':\n dfs_visit(v)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n stack = util.Stack() # stack for searshing the graph\n visited = [] # Keep track of visited nodes\n start =problem.getStartState() # The start node\n stack.push((start, [])) # the sart state and empty path list is pushed to the stack\n \n while stack:\n (vrtx, path) = stack.pop() # Pop tfrom the stack , vrtx: the poped node for expantion.\n if vrtx not in visited: # if the node is visited alraedy \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n visited.append(vrtx)\n for successor in problem.getSuccessors(vrtx):\n stack.push((successor[0], path+[successor]))\n util.raiseNotDefined()", "def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n mystack = util.Stack()\n startNode = (problem.getStartState(), '', 0, [])\n mystack.push(startNode)\n visited = set()\n while mystack :\n node = mystack.pop()\n state, action, cost, path = node\n if state not in visited :\n visited.add(state)\n if problem.isGoalState(state) :\n path = path + [(state, action)]\n break;\n succNodes = problem.expand(state)\n for succNode in succNodes :\n succState, succAction, succCost = succNode\n newNode = (succState, succAction, cost + succCost, path + [(state, action)])\n mystack.push(newNode)\n actions = [action[1] for action in path]\n del actions[0]\n return actions", "def dfs(self, starting_vertex, destination_vertex):\n pass # TODO", "def recursive_dft(self, start, visited=[]):\n if start not in visited:\n visited.append(start)\n for i in self.neighbors(start):\n self.recursive_dft(i, visited)\n return visited", "def DFS(self, start_vertex):\n yield from self._search(start_vertex, kind='DFS')", "def dft(self, starting_vertex):\n # create an empty stack and push the starting vertex ID\n stack = Stack()\n stack.push(starting_vertex)\n # create an empty Set to store the visited vertices\n visited = set()\n # while the stack is not empty ...\n while stack.size() > 0:\n # pop the first vertex\n vert = stack.pop()\n # if that vertex has not been visited ..\n if vert not in visited:\n # mark it is visited\n visited.add(vert)\n print(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n stack.push(neighbor)", "def dft_recursive(self, starting_vertex, visited=None):\n # First, we set our initial condition\n if visited is None:\n # If no nodes have been visited, we create a set to store the nodes we visit\n visited = set()\n\n # Then we add the starting vertex to the visited set\n visited.add(starting_vertex)\n print(starting_vertex)\n\n # Call the function recursively on neighbors not visited\n # Lastly we write a for loop that will recursively call dft_recursive()\n for neighbor in self.vertices[starting_vertex]:\n # For each vertex, we check to see if any of the neighbors have already been visited\n if neighbor not in visited:\n # And if we find a neighbor that has not been visited, we recursively call dft_recursive() and pass it the neighbor and updated visited set\n self.dft_recursive(neighbor, visited)", "def dfs(self, starting_vertex, destination_vertex):\n \"\"\" LIFO\n Create a stack\n Create a set to store visited\n PUSH starting vertex into an array (STACK)\n While the STACK is NOT empty \n get((pop) first PATH vertex\n get Vertex from END of PATH\n check if NOT visited\n mark as visited\n check if vertex is destination_vertex\n If TRUE, return path \n PUSH path to ALL of neighbors\n make copy of current path\n add neighbor to path copy\n PUSH path copy\n \"\"\" \n s = Stack() # Create a stack\n s.push([starting_vertex]) # PUSH starting vertex into an array (STACK)\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the STACK is NOT empty\n path = s.pop() # get(pop) first PATH vertex)\n v = path[-1] # get Vertex from END of PATH \n\n while v not in visited: # check if NOT visited\n visited.add(v) # mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path \n\n for n in self.get_neighbors(v): # PUSH path to ALL of neighbors\n path_c = path[:] # make copy of current path\n # path_c.extend([n]) # add neighbor to path copy\n path_c.append(n) # add neighbor to path copy\n s.push(path_c) # PUSH path copy", "def dfs(self, starting_vertex, destination_vertex):\n # TODO", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n Pilha_Caminho = Stack()\n Pilha_Estados = Stack()\n Caminho = []\n Visitados = []\n\n Pilha_Caminho.push(Caminho) # empilha caminho (vazio, no começo)\n Pilha_Estados.push(problem.getStartState()) # empilha estado inicial\n\n while (Pilha_Caminho.isEmpty() == False and Pilha_Estados.isEmpty() == False):\n Caminho_Andado = Pilha_Caminho.pop() # atualiza caminho\n Estado_Atual = Pilha_Estados.pop() # atualiza estado\n if problem.isGoalState(Estado_Atual): # caso estado atual seja o desejado,\n return Caminho_Andado # retorna o caminho total\n if Estado_Atual not in Visitados: # caso estado atual não tenha sido visitado\n Visitados.append(Estado_Atual) # marca estado como visitado\n for Sucessor in problem.getSuccessors(Estado_Atual): # busca sucessores\n if Sucessor[0] not in Visitados: # caso sucessor não tenha sido visitado\n Pilha_Caminho.push(Caminho_Andado + [Sucessor[1]]) # atualiza caminho total na pilha\n Pilha_Estados.push(Sucessor[0]) # atualiza estado\n return", "def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n ## YOUR CODE HERE\n root_node = Node(problem.getStartState(), [], 0, None, 0)\n frontier = util.Stack()\n frontier.push(root_node)\n explored = []\n\n while not(frontier.isEmpty()):\n node_to_explore = frontier.pop()\n\n if problem.isGoalState(node_to_explore.state):\n return node_to_explore.state\n else:\n copy_state = node_to_explore.state.copy()\n \n if convertStateToHash(copy_state) not in explored:\n\t explored.append(convertStateToHash(copy_state))\n\t successors_state = problem.getSuccessors(copy_state)\n\t if len(successors_state) > 0:\n\t\t for state_action_cost in successors_state:\n\t\t if convertStateToHash(state_action_cost[0]) in explored:\n\t\t continue\n\t\t else:\n\t\t frontier.push(Node(state_action_cost[0], state_action_cost[1], node_to_explore.path_cost + 1, node_to_explore, node_to_explore.depth + 1))\n\n return False\n # util.raiseNotDefined()", "def solveOneStep(self):\n ### Student code goes here\n if self.currentState not in self.visited:\n self.visited[self.currentState]=True\n return self.currentState.state == self.victoryCondition\n\n if self.currentState.state == self.victoryCondition:\n self.visited[self.currentState]=True\n return True\n\n if not self.currentState.children:\n for move in self.gm.getMovables():\n self.gm.makeMove(move)\n childrenState = GameState(self.gm.getGameState(), self.currentState.depth+1, move)\n if childrenState not in self.visited:\n childrenState.parent = self.currentState\n self.currentState.children.append(childrenState)\n self.gm.reverseMove(move)\n\n if self.currentState.nextChildToVisit<len(self.currentState.children):\n nextState = self.currentState.children[self.currentState.nextChildToVisit]\n self.currentState.nextChildToVisit += 1\n self.gm.makeMove(nextState.requiredMovable)\n self.currentState = nextState\n return self.solveOneStep()\n else:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n return self.solveOneStep()", "def idfs(start_node, goal_state, improved_descendants = False):\t\n\tnumber_nodes_expanded = 0\n\tt0 = time.time()\n\n\tfor lim in range(21): #from depth 0 to 20\n\t\tsolution, number_nodes_expanded_iter = dfs(start_node, goal_state, lim, iterative= True, improved_descendants= improved_descendants)\n\t\tnumber_nodes_expanded += number_nodes_expanded_iter\n\n\t\tt1 = time.time()\n\t\tif (t1 - t0) > 900:\n\t\t\tprint(\"It took more than 15 min\")\n\t\t\treturn False\n\n\t\tif solution:\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\treturn True\n\t\t\n\treturn False", "def dfs(self, starting_vertex, destination_vertex):\n # create an empty stack \n stack = Stack()\n #push the starting vertex ID as list\n stack.push([starting_vertex])\n # create an empty Set to store the visited vertices\n visited = set()\n # while the stack is not empty ...\n while stack.size() > 0:\n # pop the first vertex\n path = stack.pop()\n vert = path[-1]\n # if that vertex has not been visited ..\n if vert not in visited:\n #check for target\n if vert == destination_vertex:\n return path\n # mark it is visited\n visited.add(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n #copy path to avoid pass by reference\n new_path = list(path) # make a copy\n new_path.append(neighbor)\n stack.push(new_path)", "def depthFirstSearch(problem):\n\n # Initialization\n startState = problem.getStartState()\n # print \"Start:\", startState\n\n if problem.isGoalState(startState):\n return [] # No action needed\n\n route = util.Stack()\n closed = set([startState])\n stack = util.Stack() # DFS use stack\n\n # print problem.getSuccessors(startState)\n \n for successor in problem.getSuccessors(startState):\n # Use list(old_list) to make a copy of current route\n stack.push((successor, list(route.list)))\n \n # Tree search\n while not stack.isEmpty():\n #print stack.list\n ((currentState, action, cost), route.list) = stack.pop()\n\n if currentState in closed:\n continue # Skip the residue of expanded states in the stack\n\n # print \"Go \", action\n # print \"In \", currentState\n route.push(action)\n\n if problem.isGoalState(currentState): # Check for goal condition\n # print route.list\n # util.pause()\n return route.list # Return the route\n \n # Current state is not goal state\n closed.add(currentState)\n for successor in problem.getSuccessors(currentState):\n if successor[0] in closed:\n # print \"-Closed \", successor\n continue # this state is already expanded\n \n # print \"-Open \", successor\n # Use list(old_list) to make a copy of current route\n stack.push((successor, list(route.list)))", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n result = []\n st = util.Stack()\n visited = set([])\n\n current = problem.getStartState()\n st.push((current, \"\", 0))\n\n while not st.isEmpty():\n while st.top()[0] in visited:\n st.pop()\n result.pop()\n\n current = st.top()\n visited.add(current[0])\n\n if current[1] != \"\":\n result.append(current[1])\n\n if problem.isGoalState(current[0]):\n break\n\n for each in problem.getSuccessors(current[0]):\n if each[0] not in visited:\n st.push(each)\n\n path = []\n for each in result:\n if each == \"South\":\n path.append(s)\n elif each == \"West\":\n path.append(w)\n elif each == \"North\":\n path.append(n)\n else:\n path.append(e)\n\n return path\n util.raiseNotDefined()", "def DFS(G, s, t, randc=3, d=None):\n print('-' * (4-randc), s, t, randc, d)\n if randc < 0:\n return False\n if s[0] == t[0] and abs(s[1] - t[1]) == 1:\n return True\n if s[1] == t[1] and abs(s[0] - t[0]) == 1:\n return True\n for direction in (0,1,2,3):\n if direction == 0:\n u = (s[0]-1, s[1])\n elif direction == 1:\n u = (s[0]+1, s[1])\n elif direction == 2:\n u = (s[0], s[1]-1)\n else:\n u = (s[0], s[1]+1)\n print('-' * (5-randc), 'try', direction, u, G.shape)\n if 0 <= u[0] < G.shape[1] and 0 <= u[1] < G.shape[0] and G[u[1], u[0]] == 0:\n new_randc = randc if direction == d else randc - 1\n if DFS(G, u, t, new_randc, direction):\n return True\n\n return False", "def dfs(cell):\n r, c = cell\n if (0 <= r < len(grid)) and (0 <= c < len(grid[0])) and (cell not in visited) and (grid[r][c] != 0):\n\n visited.add((r, c)) # save cell\n grid[r][c] = self.num_islands\n # update current island size\n dfs((r, c+1))\n dfs((r+1, c))\n dfs((r-1, c))\n dfs((r, c-1))\n\n else:\n # out of bounds or visited\n return", "def dfs(pos, dis):\n global ans\n if pos == e:\n ans = dis - 1 if not ans or dis < ans else ans\n return\n\n # Backtracking\n if ans and dis > ans:\n return\n\n # Check the point visited\n visited[pos[0]][pos[1]] = 1\n for i in range(4):\n ny = pos[0] + dy[i]\n nx = pos[1] + dx[i]\n if 0 <= ny < N and 0 <= nx < N:\n # If the new point is not wall and not visited\n if maze[ny][nx] != 1 and not visited[ny][nx]:\n dfs([ny, nx], dis + 1)\n visited[pos[0]][pos[1]] = 0", "def dft(self, starting_vertex):\n \n visited = []\n stack = Stack()\n\n stack.add(starting_vertex)\n\n while len(stack):\n current = stack.pop()\n\n if current not in visited:\n print(current)\n visited.append(current)\n \n for child in self.vertices[current]:\n if child not in visited:\n stack.add(child)", "def dfs_loop(graph_dict, nodes, track):\n\n for node in nodes:\n if node not in track.explored:\n track.current_source = node\n dfs(graph_dict, node, track)", "def next_possible_states(path, check_dict, check):\r\n \r\n current_state_tuple = path[-1]\r\n state_container = []\r\n x = current_state_tuple[1][0]\r\n y = current_state_tuple[1][1]\r\n current_state = current_state_tuple[0]\r\n\r\n # Down\r\n if y < 3:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y + 1][x]\r\n new_state[y + 1][x] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x, y + 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Up\r\n if y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n if y == 1 and x == 0:\r\n new_state[y][x] = new_state[y - 1][x]\r\n new_state[y - 1][x] = 0\r\n if is_goal(new_state):\r\n new_index = (x, y - 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n elif y > 1:\r\n new_state[y][x] = new_state[y - 1][x]\r\n new_state[y - 1][x] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x, y - 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Left\r\n if x > 0 and y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y][x - 1]\r\n new_state[y][x - 1] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x - 1, y)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Right\r\n if x < 2 and y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y][x + 1]\r\n new_state[y][x + 1] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x + 1, y)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n return state_container", "def dfs(node, traversal):\n if traversal.terminated: return\n\n g = traversal.graph\n node_key = g.key_func(node)\n traversal.node_state[node_key] = DISCOVERED\n traversal.entry_times[node_key] = traversal.curr_time\n traversal.curr_time += 1\n\n if traversal.should_process_node(node) is not False:\n # Now go through all children\n children = list(traversal.select_children(node, reverse = True))\n # print \"Node, Children: \", g.key_func(node), children\n for n,edge in children:\n child_key = g.key_func(n)\n if traversal.node_state[child_key] != None:\n traversal.process_edge(node, n, edge)\n else: # Node has not even been discovered yet\n traversal.parents[child_key] = node\n traversal.process_edge(node, n, edge)\n dfs(n, traversal)\n\n traversal.node_state[node_key] = PROCESSED\n traversal.curr_time += 1\n traversal.exit_times[node_key] = traversal.curr_time\n traversal.node_processed(node)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from game import Directions\n waiting_list = util.Queue()\n # QUEUE\n # FIFO \n visited = set()\n parents = {}\n #collections.defaultdict(collections.UserDict)\n sequence = []\n start_state = problem.getStartState()\n for action in problem.getSuccessors(start_state):\n # in order to push full-state values\n waiting_list.push(action)\n \n while not waiting_list.isEmpty():\n node = waiting_list.pop()\n visited.add(node[0])\n for action in problem.getSuccessors(node[0]):\n \n #if child.STATE is not in explored or frontier then\n if action[0] not in visited:\n parents[action[0]] = {'parent':node} \n waiting_list.push(action)\n if problem.isGoalState(action[0]):\n target_state = action \n \n \n while target_state[0] in parents.keys():\n temp=parents[target_state[0]]['parent']\n sequence.append(target_state[1])\n target_state = temp\n sequence.append(target_state[1])\n return sequence[::-1]", "def _step(self):\n if self.best_unexplored_lower_bound < self.best_upper_bound:\n\n # Select a Node\n self._active_node = self._pop_node_with_best_lower_bound()\n\n # Reporting\n if self._reporting:\n print(self.report)\n\n # Select a Vertex\n unassigned_vertex_chosen = self._choose_unassigned_vertex_highest_degree()\n\n # Branch\n self._active_node.construct_children_nodes(\n unassigned_vertex_chosen,\n self._terminals_by_vertex[unassigned_vertex_chosen],\n )\n\n # NB: we do not need to worry about duplicate nodes\n # the nodes are constructed by forcing an assignment of\n # vertices to terminals. Thus, the resulting partitions\n # can never be identical\n self._unexplored_nodes += self._active_node.children\n self._all_nodes += self._active_node.children\n\n else:\n # if there are no unassigned vertices, we are at a leaf node\n self._done = True", "def dft(self, starting_vertex):\n # Create a s and push starting vertex\n ss = Stack()\n ss.push([starting_vertex])\n # Create a set of traversed vertices\n visited = []\n eldest = [] \n # While stack is not empty:\n while ss.size() > 0:\n # dequeue/pop the first vertex\n path = ss.pop()\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n # mark as visited\n visited.append(path[-1])\n print(visited)\n # enqueue all neightbors\n if not self.get_neighbors(path[-1]):\n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n eldest.append(path[-1])\n\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n # print(new_path)\n new_path.append(next_vert)\n ss.push(new_path)\n \n return min(eldest)", "def dfs(self, starting_vertex, destination_vertex): # great for if you know the start and end, like a maze with 1 entry/1 exit\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n s = Stack() # create an empty Stack\n s.push([starting_vertex]) # push the starting vertex to the top of the stack \n\n while s.size() > 0: # loop if the size is greater than 0\n path = s.pop() # pop off the top element of the stack and store \n v = path[-1] # store the vertex from the end of path\n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors\n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n s.push(path_copy) # push the path copy to the Stack", "def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n # YOUR CODE HERE\n frontier = util.Stack()\n explored = set()\n initialState = problem.getStartState()\n frontier.push(initialState)\n while not frontier.isEmpty():\n choice = frontier.pop()\n if convertStateToHash(choice) not in explored:\n if problem.isGoalState(choice):\n return choice\n successors = problem.getSuccessors(choice)\n for successor in successors:\n frontier.push(successor[0])\n explored.add(convertStateToHash(choice))\n # util.raiseNotDefined()", "def solveOneStep(self):\n ### Student code goes here\n if (self.currentState.state == self.victoryCondition) or (self.currentState not in self.visited):\n self.visited[self.currentState] = True\n win_or_not = self.currentState.state == self.victoryCondition\n return win_or_not\n\n if not self.currentState.nextChildToVisit: \n its = 0\n for movable in self.gm.getMovables():\n its += 1\n # time test\n # too long \n if its == \"too long\":\n return \"too long\"\n #make every move in movable\n self.gm.makeMove(movable)\n new = self.gm.getGameState()\n new_gs = GameState(new, self.currentState.depth+1, movable)\n \n if new_gs not in self.visited:\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.gm.reverseMove(movable) \n \n num_children = len(self.currentState.children)\n if self.currentState.nextChildToVisit < num_children:\n new = self.currentState.children[self.currentState.nextChildToVisit]\n self.currentState.nextChildToVisit = self.currentState.nextChildToVisit + 1\n self.gm.makeMove(new.requiredMovable)\n self.currentState = new\n #recurse\n return self.solveOneStep()\n else:\n self.currentState.nextChildToVisit = self.currentState.nextChildToVisit + 1\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n #recurse\n return self.solveOneStep()", "def dfs(x, p, step):\n disc[x] = low[x] = step\n for xx in graph.get(x, []): \n if disc[xx] == inf: \n step += 1\n dfs(xx, x, step)\n low[x] = min(low[x], low[xx])\n if low[xx] > disc[x]: ans.append([x, xx]) # bridge\n elif xx != p: low[x] = min(low[x], disc[xx])", "def _dfs(grid, i, j):\n grid[i][j] = False\n for x in range(i - 1, i + 2):\n for y in range(j - 1, j + 2):\n if (abs((x + y) - (i + j)) == 1) and _is_valid_land(x, y, grid):\n _dfs(grid, x, y)", "def BFS(initial_state, check_dict):\r\n \r\n print(\"Implementing BFS...\")\r\n q = deque()\r\n q.append(initial_state)\r\n accomplished = False\r\n \r\n while len(q) != 0:\r\n path = q.pop()\r\n \r\n if is_goal(path[-1][0]):\r\n goal = path\r\n accomplished = True\r\n break\r\n \r\n state_container = next_possible_states(path, check_dict, False)\r\n for i in state_container:\r\n if len(path) <= 1:\r\n temp = list(path)\r\n temp.append(i)\r\n q.appendleft(temp)\r\n else:\r\n if i[0] != path[-2][0]:\r\n temp = list(path)\r\n temp.append(i)\r\n q.appendleft(temp)\r\n\r\n \r\n if accomplished:\r\n print(\"Solved! Number of moves:\", len(goal) - 1)\r\n return goal, True\r\n else:\r\n print(\"Cannot be solved. Number of moves:\", len(path) - 1)\r\n print(path)\r\n return path, False", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Stack()\n return GraphSearch(problem, 'dfs').search(fringe)", "def dft(self, starting_vertex):\n # First, we create an empty stack and push the starting vertex\n ss = Stack()\n ss.push(starting_vertex)\n\n # Then we create a set to store the vertices we visit\n visited = set()\n\n # Here we write a while loop that will run as long as the stack is not empty\n while ss.size() > 0:\n # We pop the node off the top of the stack and set (v) to it\n v = ss.pop()\n\n # Next we check to see if that vertex has already been visited\n if v not in visited:\n # If it hasn't been visited, we print it out and mark it as visited\n print(v)\n visited.add(v)\n\n # Lastly, we push all its neighbors on the stack\n for next_vert in self.get_neighbors(v):\n ss.push(next_vert)" ]
[ "0.71973026", "0.69657785", "0.69124275", "0.6738573", "0.6701349", "0.6647733", "0.66013175", "0.6577175", "0.6573915", "0.6554322", "0.65495586", "0.6530897", "0.6520473", "0.6477678", "0.64633286", "0.64230543", "0.6422694", "0.6400245", "0.6394926", "0.6365001", "0.6357069", "0.6357069", "0.6322773", "0.6309887", "0.6306343", "0.62994236", "0.62832314", "0.62774235", "0.627606", "0.6271552", "0.62690437", "0.62658435", "0.62635314", "0.62607753", "0.6257489", "0.623338", "0.6232872", "0.6217125", "0.6216032", "0.62086135", "0.6182793", "0.61691767", "0.6156251", "0.61530966", "0.6151061", "0.6127781", "0.6124494", "0.6124487", "0.6118429", "0.61131877", "0.611272", "0.61069524", "0.61016095", "0.6101086", "0.6035765", "0.601597", "0.6001327", "0.5990648", "0.5982939", "0.5981291", "0.5979527", "0.5974622", "0.5968501", "0.5965676", "0.5963717", "0.59593135", "0.5949801", "0.5926562", "0.59241897", "0.5923468", "0.59216475", "0.58937204", "0.5884211", "0.5883193", "0.587088", "0.5840769", "0.58399355", "0.5839388", "0.58335096", "0.58219063", "0.5818374", "0.58139133", "0.5810753", "0.5803919", "0.5800146", "0.5795909", "0.57934487", "0.57928085", "0.57887363", "0.57880116", "0.5784283", "0.5779811", "0.5779644", "0.5779219", "0.577765", "0.576309", "0.57459074", "0.57438827", "0.57413816", "0.5739502" ]
0.695327
2
If a datagram is available, get it and return it, otherwise return None.
def get_net_message(): # TODO: refactor to use a list of events encoded using masgpack? try: message, address = serverSocket.recvfrom(1024) except: return None, None message = message.decode('utf-8') return message, address
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_device_or_None(id):\n try:\n d = Device.objects.get(id=id)\n return d\n except Device.DoesNotExist:\n return None", "def try_get(self, device_id):\n with self.lock:\n return self.devices.get(device_id, None)", "def fetch_packet_from_analyzer(self):\n\n try:\n # Read a packet from the backend, and add it to our analysis queue.\n return self.read_packet(timeout=self.PACKET_READ_TIMEOUT, blocking=False)\n\n except queue.Empty:\n # If no packets were available, return without error; we'll wait again next time.\n return None", "def _get_data(self):\n while True:\n # self.logger.debug(\"data queue size is: {}\".format(len(self._dataqueue)))\n ans = self._parser.find_first_packet(self._dataqueue[:])\n if ans:\n self._dataqueue = ans[1]\n # self.logger.debug(\"found packet of size {}\".format(len(ans[0])))\n return ans[0]\n else:\n # self.logger.debug(\"Could not find packet in received data\")\n tmp = self.conn.recv(1024)\n self._dataqueue += tmp", "def discover(self, srv_port):\n addr = None\n answ = None\n\n # Creates a new datagram socket to broadcast\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n s.settimeout(self.timeout)\n s.sendto(REQ_HELLO, ('255.255.255.255', srv_port))\n\n # Wait for a server answer\n try:\n answ = s.recvfrom(1024)\n except socket.timeout:\n print 'Timeout exceeded...'\n\n # Close the diagram socket.\n s.close()\n\n if answ is not None and answ[0] == ANS_HELLO:\n # Saves the address if the server answer was correct.\n addr = answ[1]\n return addr", "def receive(self) -> Packet:\n try:\n return packet_from_bytes(self._socket.recvfrom(MAX_PACKET_SIZE)[0])\n except socket.timeout:\n raise racetools.errors.Timeout('timeout reached waiting for UDP packet') from None", "def find_dac():\n\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ts.bind((\"0.0.0.0\", 7654))\n\n\twhile True:\n\t\tdata, addr = s.recvfrom(1024)\n\t\tbp = BroadcastPacket(data)\n\t\t\n\t\tprint \"Packet from %s: \" % (addr, )\n\t\tbp.dump()", "def recv_packet(self):\r\n self.recv_bytes()\r\n\r\n packet_length_index = 0\r\n \r\n amount_data = len(self.recvBuffer) # available amount of data to read\r\n \r\n if amount_data <= packet_length_index: # just 0's in the buffer\r\n return None\r\n\r\n if len(self.recvBuffer) <= packet_length_index + 2: # length not received\r\n return None\r\n \r\n packet_length = unpack(self.recvBuffer, packet_length_index, 'H')\r\n \r\n if packet_length > len(self.recvBuffer): # packet not fully received\r\n return None\r\n \r\n if packet_length == 0: # some wrong generated packet by server, inc position of reading packet length\r\n packet_length_index += 1\r\n return None\r\n\r\n\t\t# extract packet data\r\n packet = self.recvBuffer[packet_length_index:packet_length_index+packet_length]\r\n\r\n # remaining recv buffer\r\n self.recvBuffer = self.recvBuffer[packet_length_index + packet_length:]\r\n packet_length_index = 0 # next packet length should be at pos 0 again\r\n\r\n return packet", "def get_device(self):\n addr = self.address\n servers = [server for server in pyrax.cloudservers.list()\n if addr in server.networks.get(\"private\", \"\")]\n try:\n return servers[0]\n except IndexError:\n return None", "def recieve_data(self):\r\n try:\r\n while True:\r\n try:\r\n data, self.addr = self.sock.recvfrom(1024)\r\n return data\r\n except socket.timeout:\r\n print(\"There is no packet at all!\")\r\n break\r\n except Exception:\r\n print(\"Can't recieve a package\")", "def get_device_by_mac_or_None(mac):\n try:\n d = Device.objects.get(mac=mac)\n return d\n except Device.DoesNotExist:\n return None", "def peek(self) -> t.Optional[Record]:\n self._buffer(1)\n if self._record_buffer:\n return self._record_buffer[0]\n return None", "def _get_available_record_entry(self, zone):\n entries = zone.extra\n for entry in range(1, MAX_RECORD_ENTRIES + 1):\n subdomain = entries.get(\"S%s\" % entry)\n _type = entries.get(\"T%s\" % entry)\n data = entries.get(\"D%s\" % entry)\n if not any([subdomain, _type, data]):\n return entry\n return None", "def read(self):\n packet = None\n while packet is None:\n packet = self.async_read()\n return packet", "def datagram(\r\n self,\r\n family = socket.AF_INET,\r\n type = socket.SOCK_DGRAM,\r\n local_host = None,\r\n local_port = None,\r\n remote_host = None,\r\n remote_port = None,\r\n callback = None\r\n ):\r\n\r\n # creates the socket that it's going to be used for the listening\r\n # of new connections (client socket) and sets it as non blocking\r\n _socket = socket.socket(family, type)\r\n _socket.setblocking(0)\r\n\r\n # sets the various options in the service socket so that it becomes\r\n # ready for the operation with the highest possible performance\r\n _socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n _socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\r\n\r\n # in case both the local host and port are defined runs the bind\r\n # operation so that the current socket is st to listen for new\r\n # datagrams on the associated host and port\r\n if local_host and local_port: _socket.bind((local_host, local_port))\r\n\r\n # verifies if both the host and the port are set and if that's the\r\n # case runs the connect (send bind) operation in the datagram socket\r\n # notice that this is not a \"real\" remote connection\r\n if remote_host and remote_port: _socket.connect((remote_host, remote_port))\r\n\r\n # creates a new connection object representing the datagram socket\r\n # that has just been created to be used for upper level operations\r\n # and then immediately sets it as connected\r\n connection = self.base_connection(_socket, datagram = True)\r\n connection.open()\r\n connection.set_connected()\r\n\r\n # in case a callback is defined schedules its execution for the next\r\n # tick to avoid possible issues with same tick registration\r\n if callback: self.delay(lambda: callback(connection, True), immediately = True)\r\n\r\n # returns the connection to the caller method so that it may be used\r\n # for operation from now on (latter usage)\r\n return connection", "def read(self, visibility_timeout=None):\r\n rs = self.get_messages(1, visibility_timeout)\r\n if len(rs) == 1:\r\n return rs[0]\r\n else:\r\n return None", "def get(self) -> Optional[Message]:\n self._recv()\n if not self.inq:\n return None\n return self.inq.popleft()", "def async_read(self):\n self.lock.acquire()\n\n # append data\n self.rx_buffer += self.interface.read()\n\n # ensure first byte start with 0xbc\n if len(self.rx_buffer) > 0:\n if self.rx_buffer[0] != 0xbc:\n try:\n pkt_start = self.rx_buffer.index(0xbc)\n self.rx_buffer = self.rx_buffer[pkt_start:]\n except ValueError:\n self.rx_buffer = bytes()\n\n # check if we got a valid packet\n if len(self.rx_buffer) >= 4:\n pkt_size = unpack('<H', self.rx_buffer[2:4])[0]\n # check if we got a complete packet\n if len(self.rx_buffer) >= (pkt_size + 5):\n # yep, parse this packet\n packet = Packet.fromBytes(self.rx_buffer[:pkt_size+5])\n self.rx_buffer = self.rx_buffer[pkt_size+5:]\n self.lock.release()\n return packet\n\n # otherwise, return None\n self.lock.release()\n return None", "def recvfrom(self, buflen):\n\n data = baseRecv(buflen);\n if data is not None:\n message = data[0];\n mac_header = data[1];\n ip_header = data[2];\n udp_header = data[3];\n\n udp_to = udp_header[0];\n mac_from = mac_header[1];\n ip_from = ip_header[1];\n udp_from = udp_header[1];\n\n\n # Add the MAC to the MAC dictionary if it is not already recorded.\n if ip_from in self.macDict: self.macDict[ip_from] = mac_from;\n\n # If the message is not addressed to this computer's IP, discard the message (should be redudant with MAC)\n if ip_to != self.my_ip_addr: return None;\n\n # If the message is not addressed to this application's port, discard the message\n if udp_to != self.my_port: return None;\n\n return message, pubIPToMorse(ip_from,udp_from); \n else: return None;", "def datagram_received(self, data: bytes, addr: tuple[str, int]) -> None:\n packet = self.udp_pack.unpack(data)\n if packet is None:\n return\n asyncio.create_task(self._push(packet))", "def _get_email_forwarding_by_uwnetid(uwnetid):\n if uwnetid is None:\n return None\n return get_email_forwarding(uwnetid)", "def get_data(self):\n\n data = self.socket.recv(BUFFER_SIZE)\n\n if not data:\n return None\n\n if len(data) == BUFFER_SIZE:\n while True:\n try:\n data += self.socket.recv(BUFFER_SIZE)\n except:\n break\n \n return data", "def get_udp_packet(self, sock, size=0):\n\n pkt = ''\n while True:\n buf = ''\n try:\n buf = sock.recvfrom(64)[0]\n except socket.timeout:\n break\n if size and len(pkt) >= size:\n break\n if not buf:\n break\n pkt += buf\n return pkt", "def get_main_ipv4():\n try:\n # No data is actually transmitted (UDP)\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect( ('8.8.8.8', 53) )\n real_ip = s.getsockname()[0]\n s.close()\n return real_ip\n except socket.error as e:\n logging.error(\"Cannot retrieve current IPv4 address: %s\" % e)\n return None", "def GetDevice(self, arg):\n\n if not arg: return None\n\n deviceSpec = DeviceId(arg)\n\n for device in self.YieldAllDevices():\n if deviceSpec.Matches(device): return device", "def find_target_device(ble_device, name):\r\n scan_report = ble_device.scanner.start_scan().wait()\r\n\r\n for report in scan_report.advertising_peers_found:\r\n if report.advertise_data.local_name == name:\r\n return report.peer_address", "def get_stats_request(self, request, dpid):\n dp = self.dpset.get(dpid)\n func = self.reqfunction.get(request, None)\n if dp and func:\n return func(dp, self.waiters)\n return None", "def receive(ip=None, port=0):\n\tsock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n\n\tsock.bind((ip, port))\n\n\twhile True:\n\t\tdata, addr = sock.recvfrom(1024) # buffer size is 1024 bytes\n\t\tif len(data) > 0:\n\t\t\tbreak\n\n\treturn data", "def device_info(device_id):\n device_info_map = listall.device_raw_info()[\"devices\"]\n for operating_system in device_info_map.keys():\n devices = device_info_map[operating_system]\n for device in devices:\n if device[\"udid\"].lower() == device_id.lower():\n return device\n return None", "def baseRecv(self, buflen):\n\n # Attempts to retrieve a message from the queue initilized in bind, returns None if there are no messages\n data = r.popMessage(); \n\n # Checks to see if a message was retrieved\n if data is None:\n return None; # Not certain if this is the correct return for this...\n else:\n dest_mac = data[0];\n source_mac = data[1];\n length = data[2];\n payload = data[3];\n mac_header = dest_mac+source_mac+length;\n\n ip_header = payload[:7];\n udp_header = payload[7:9];\n message = payload[9:];\n\n # If the message is not addressed to this computer's MAC, discard the message\n if dest_mac != self.my_mac: return None;\n\n if (buflen<len(message)): return None;\n else: return message, mac_header, ip_header, udp_header;", "def get_main_ipv6():\n try:\n # No data is actually transmitted (UDP)\n s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)\n s.connect( ('2001:4860:4860::8888', 53) )\n real_ip = s.getsockname()[0]\n s.close()\n return real_ip\n except socket.error as e:\n logging.error(\"Cannot retrieve current IPv6 address: %s\" % e)\n return None", "def datagramReceived(self, datagram_, address):\n #if DEBUG: print \"Datagram received from \"+ repr(address) \n datagram = simplejson.loads(datagram_)\n if not hasattr(datagram,'keys'):\n if DEBUG: print \"unknown UDP message:\\n\", datagram\n pdb.set_trace()\n return\n if 'loop_started' in datagram.keys():\n return\n if 'shotnumber_started' in datagram.keys():\n #dc.get('_exp_sync').shotnumber = datagram['shotnumber_started']\n #return\n self.server.pxi_time = float(datagram['time'])\n self.server.pxi_time_server_time = float(datagram['time']) - float(time.time())#Make this so that it synchronizes the clocks CP\n\n msg = {\"data_context\": 'PXI',\n \"shotnumber\":datagram['shotnumber_started']}\n msg = simplejson.dumps(msg, ensure_ascii = False).encode('utf8')\n self.server.broadcast(msg) \n if DEBUG: print datagram\n \n self.server.active_parser_ip = datagram['server_ip_in_charge']#Make this so that it synchronizes the clocks CP\n self.server.active_parser_port = datagram['server_port_in_charge']#Make this so that it synchronizes the clocks CP\n dc = self.server.command_library.__determineContext__({'data_context':'PXI'}) \n if not dc.dict.has_key('_exp_sync'):\n exp_sync = sync.Experiment_Sync_Group(self.server, dc.name)\n dc.update({'_exp_sync':exp_sync})\n dc.get('_exp_sync').shotnumber = int(datagram['shotnumber_started'])\n print \"Shot started:\", datagram['shotnumber_started'], \"pxi_time:\", self.server.pxi_time, \"time.time():\", float(time.time())\n return\n \n \n if 'fake_shotnumber_started' in datagram.keys():\n if self.server.ip == '10.1.1.124':\n return\n print datagram\n msg = {\"data_context\": datagram['data_context'],\n \"shotnumber\":datagram['fake_shotnumber_started']}\n msg = simplejson.dumps(msg, ensure_ascii = False).encode('utf8')\n self.server.broadcast(msg) \n dc = self.server.command_library.__determineContext__(datagram) \n if not dc.dict.has_key('_exp_sync'):\n exp_sync = sync.Experiment_Sync_Group(self.server, dc.name)\n dc.update({'_exp_sync':exp_sync})\n dc.get('_exp_sync').shotnumber = int(datagram['fake_shotnumber_started'])\n if DEBUG: print \"Fake Shot started:\", datagram['fake_shotnumber_started'], \"pxi_time:\", datagram['time'], \"time.time():\", float(time.time())\n dc.update({'Test_instrument':glab_instrument.Glab_Instrument(params={'server':self.server,'create_example_pollcallback':True})})\n return\n \n try:\n datagram[\"server_ping\"] \n except KeyError:\n if DEBUG: print \"unknown UDP message:\\n\", datagram\n return\n ping_command = commands.ServerCommand(self.server, self.server.catch_ping, datagram)\n self.server.command_queue.add(ping_command)", "def get_mountpoint(host, fqpath):\n command = \"df -P %s | awk 'END{print $NF}'\" % fqpath\n rcode, rout, rerr = g.run(host, command)\n if rcode == 0:\n return rout.strip()\n\n g.log.error(\"Get mountpoint failed: %s\" % rerr)\n return None", "def find_ipv4():\n try:\n r = requests.get(v4_url)\n tree = html.fromstring(r.content)\n result = tree.xpath('//body/text()')\n result = result[0].split()\n ipv4 = result[len(result)-1]\n except:\n if cfg['debug']:\n print(\"Couldn't connect to %s\" % v4_url)\n print(\"Check that you have a valid IPv4 default route\")\n ipv4 = None\n\n return ipv4", "def recv(self) -> Optional[bytes]:\n ready, _, _ = select.select([self.socket], [], [], 0)\n if len(ready) != 0:\n new_bytes = self.socket.recv(self.BUFFER_SIZE)\n self.__recv_buffer = self.__recv_buffer + new_bytes\n return self.__parse_one_message()", "def get_data(self):\n try:\n data = self._queue.get(block=False)\n except Empty:\n data = None\n return data", "def peek(self):\r\n if self.size():\r\n return self.queue[0]\r\n else:\r\n return None", "def get_data_address(request):\n # Check if the request is for a specific station or for all.\n if len(request) >= MIN_DRM_REQUEST_ADDR_SIZE:\n data_values = request.split(DATA_SEPARATOR)\n dest_addr = data_values[0].strip()\n data = data_values[1].strip()\n return data, dest_addr\n return request, None", "def _get_unused_udp_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('', 0))\n port = s.getsockname()[1]\n s.close()\n return port", "def _get(self, ndef_message, timeout=1.0):\n if not self.socket:\n try:\n self.connect('urn:nfc:sn:snep')\n except nfc.llcp.ConnectRefused:\n return None\n else:\n self.release_connection = True\n else:\n self.release_connection = False\n try:\n snep_request = b'\\x10\\x01'\n snep_request += struct.pack('>L', 4 + len(str(ndef_message)))\n snep_request += struct.pack('>L', self.acceptable_length)\n snep_request += str(ndef_message)\n if send_request(self.socket, snep_request, self.send_miu):\n response = recv_response(\n self.socket, self.acceptable_length, timeout)\n if response is not None:\n if response[1] != 0x81:\n raise SnepError(response[1])\n return response[6:]\n finally:\n if self.release_connection:\n self.close()", "def datagramReceived(self, data):\n raise NotImplementedError()", "def by_name(name):\n devices = discover()\n\n for device in devices or []:\n if device.player_name == name:\n return device\n return None", "def _find_device(self):\n for bus in usb.busses():\n for dev in bus.devices:\n if dev.idVendor == self.vendor_id and dev.idProduct == self.product_id:\n if self.device_id is None or dev.filename == self.device_id:\n log.info('found station on USB bus=%s device=%s' % (bus.dirname, dev.filename))\n return dev\n return None", "def get_ip_address(ifname, family=socket.AF_INET):\n if family == socket.AF_INET:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n ip = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15]))[20:24])\n except IOError:\n return None\n return ip\n elif family == socket.AF_INET6:\n try:\n with open(\"/proc/net/if_inet6\", \"r\") as f:\n if6lines = f.readlines()\n for line in if6lines:\n val = line.split()\n # filter LINKLOCAL address\n if val[3] != '20' and val[-1] == str(ifname):\n return Convert.format_proc_address(val[0])\n return None\n except Exception as e:\n SysTools.logger.error(\"can not get the ipv6 address of %s : %s\", str(ifname), str(e))\n return None\n else:\n return None", "def receive(self):\n if self.sock is not None:\n return recv_msg(self.sock)\n return None", "def peek(self):\n if self.size == 0:\n return None\n return self.first.data", "def papi(self) -> typing.Union[None, str]:\n if self.airfield.data['2.14']['data']:\n for row in self.airfield.data['2.14']['data'][2:]:\n if row[0] == self.designation:\n return row[3].partition('\\n')[0] or None\n return None", "def get_target_port(self, dsf):\n\n try:\n message = \"Get The Target Port Address\"\n command = \"spt dsf={dsf} inquiry page=deviceid output-format=json emit=\".format(dsf=dsf)\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=False)\n deviceid = json.loads(pdata['stdout'])\n\n for desc in deviceid['Device Identification']['Identifier Descriptor List']:\n if ( desc.get('Protocol Identifier Description') and\n desc['Protocol Identifier Description'] == 'SAS Serial SCSI Protocol' ):\n return desc['IEEE Registered Identifier']\n\n return None\n\n except RuntimeError as exc:\n self._logger.error(\"Failed to acquire SCSI Inquiry Device ID page: {0}\".format(exc))\n raise exc", "def get_device(l):\n if not l.device:\n l.device = find_device()\n setup_device(l.device)\n return l.device", "async def get_device(hass: HomeAssistant, device_id: str) -> Optional[DeviceEntry]:\n device_registry = await hass.helpers.device_registry.async_get_registry()\n return device_registry.async_get(device_id)", "def request(self, data):\n # Create a new socket using the given address family, socket type and protocol number\n sock = socket(AF_INET, SOCK_DGRAM)\n # Set the value of the given socket option (see the Unix manual page setsockopt(2)).\n sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)\n # define timout to settings.TIMEOUT_DELAY\n sock.settimeout(settings.TIMEOUT_DELAY)\n try:\n log.debug(\"[UDP] Sending request to {}:{} > \\\"{}\\\".\".format(self.host, self.port, self._remove_new_line(data)))\n # Send data to the socket.\n sock.sendto(data, (self.host, self.port))\n # Receive data from the socket (max amount is the buffer size).\n data = sock.recv(self.buffer_size)\n log.debug(\"[UDP] Got back > \\\"{}\\\".\".format(self._remove_new_line(data)))\n # in case of timeout\n except timeout, msg:\n # TODO: Maybe retry 3 times\n log.error(\"[UDP] Request Timeout. {}\".format(msg))\n data = \"ERR\"\n # in case of error\n except error, msg:\n log.error(\"[UDP] Something happen when trying to connect to {}:{}. Error: {}\".format(self.host, self.port, msg))\n data = \"ERR\"\n finally:\n # Close socket connection\n sock.close()\n data = self._remove_new_line(data)\n return data", "def peek(self):\n if self.is_empty():\n return None\n\n return self.linked_list.head.data", "def _guess_lan_address():\n blacklist = [\"127.0.0.1\", \"0.0.0.0\", \"255.255.255.255\"]\n for interface in netifaces.interfaces():\n addresses = netifaces.ifaddresses(interface)\n for option in addresses.get(netifaces.AF_INET, []):\n if \"broadcast\" in option and \"addr\" in option and not option[\"addr\"] in blacklist:\n if __debug__: dprint(\"interface \", interface, \" address \", option[\"addr\"])\n return option[\"addr\"]\n #Exception for virtual machines/containers\n for interface in netifaces.interfaces():\n addresses = netifaces.ifaddresses(interface)\n for option in addresses.get(netifaces.AF_INET, []):\n if \"addr\" in option and not option[\"addr\"] in blacklist:\n if __debug__: dprint(\"interface \", interface, \" address \", option[\"addr\"])\n return option[\"addr\"]\n dprint(\"Unable to find our public interface!\", level=\"error\")\n return None", "def get_device_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n ip = sock.getsockname()[0]\n sock.close()\n return ip", "def try_read(self):\r\n pos = self._fp.tell()\r\n try:\r\n return self.read()\r\n except RecordIO.PrematureEndOfStream as e:\r\n log.debug('Got premature end of stream [%s], skipping - %s' % (self._fp.name, e))\r\n self._fp.seek(pos)\r\n return None", "def get_interface_ip_address(device, interface, address_family,\r\n return_all=False):\r\n if address_family not in [\"ipv4\", \"ipv6\", \"inet\", \"inet6\"]:\r\n log.info('Must provide one of the following address families: '\r\n '\"ipv4\", \"ipv6\", \"inet\", \"inet6\"')\r\n return\r\n\r\n if address_family == \"ipv4\":\r\n address_family = \"inet\"\r\n elif address_family == \"ipv6\":\r\n address_family = \"inet6\"\r\n\r\n try:\r\n out = device.parse('show interfaces terse {interface}'.format(\r\n interface=interface))\r\n except SchemaEmptyParserError:\r\n return\r\n\r\n # Example dictionary structure:\r\n # {\r\n # \"ge-0/0/0.0\": {\r\n # \"protocol\": {\r\n # \"inet\": {\r\n # \"10.189.5.93/30\": {\r\n # \"local\": \"10.189.5.93/30\"\r\n # }\r\n # },\r\n # \"inet6\": {\r\n # \"2001:db8:223c:2c16::1/64\": {\r\n # \"local\": \"2001:db8:223c:2c16::1/64\"\r\n # },\r\n # \"fe80::250:56ff:fe8d:c829/64\": {\r\n # \"local\": \"fe80::250:56ff:fe8d:c829/64\"\r\n # }\r\n # },\r\n # }\r\n # }\r\n # }\r\n\r\n found = Dq(out).contains(interface).contains(address_family). \\\r\n get_values(\"local\")\r\n if found:\r\n if return_all:\r\n return found\r\n return found[0]\r\n return None", "def _get_message(self):\n if not self.opened: return None\n \n # read as much as possible\n read = 0\n try: \n chars = [b'0']\n addr = None\n logger.debug(\"%s: Socket read started...\" % \\\n self.__class__.__name__)\n while(len(chars) > 0):\n try:\n chars, addr = self._udp_socket.recvfrom(1)\n except socket.error:\n logger.debug(\"%s: ...Socket read complete.\" % \\\n self.__class__.__name__)\n break\n except socket.timeout:\n logger.debug(\"%s: ...Socket read complete.\" % \\\n self.__class__.__name__)\n break\n if len(chars) > 0:\n if addr not in self._rbuff:\n self._rbuff[addr] = []\n if sys.version_info[0] > 2:\n self._rbuff[addr] += chars\n else:\n self._rbuff[addr] += map(ord,chars)\n read += len(chars)\n else:\n logger.error(\"%s: ...Socket has been closed.\" % \\\n (self.__class__.__name__))\n self.close()\n return None\n logger.debug(\"%s: ...Socket read complete.\" % \\\n self.__class__.__name__)\n except Exception as ex:\n logger.error(\"%s: ...Socket read failed:\\n%s\" % \\\n (self.__class__.__name__,str(ex)))\n raise utils.TransportError \\\n (\"Socket Message get failed!\\n\" + str(ex))\n if read > 0 :\n logger.info(\"%s: Read %d bytes.\" % (self.__class__.__name__, read))\n \n # Check all Clients\n for addr in self._rbuff.keys():\n \n # Look for message start (SOH XX ~XX)\n disc = []\n while(len(self._rbuff[addr]) > 3 and (\n self._rbuff[addr][0] != messages.HorizonMessage.SOH or\n self._rbuff[addr][1] != 0xFF&(~self._rbuff[addr][2]) or\n self._rbuff[addr][1] == 0)):\n disc.append(self._rbuff[addr].pop(0))\n if len(disc) > 0:\n logger.info(\"%s: Discarded %d bytes:\\n%s\" % (\n self.__class__.__name__, len(disc), \n ' '.join(map(utils.hex,disc))))\n if len(self._rbuff[addr]) < 3:\n continue\n \n # Extract Expected Message Length\n length = self._rbuff[addr][1] + 3 \n \n # Look for next message start\n for i in range(1,len(self._rbuff[addr])-2):\n if self._rbuff[addr][i] == messages.HorizonMessage.SOH and \\\n self._rbuff[addr][1]==0xFF&(~self._rbuff[addr][2]) and \\\n self._rbuff[addr][1] != 0:\n if i < length:\n length = i\n break\n \n # Not all read yet\n if len(self._rbuff[addr]) < length:\n continue\n \n # Extract Message\n raw = self._rbuff[addr][0:length]\n self._rbuff[addr] = self._rbuff[addr][length:]\n logger.info(\"%s: Message of %d bytes found:\\n%s\" % (\n self.__class__.__name__, len(raw), \n ' '.join(map(utils.hex,raw))))\n msg = messages.HorizonMessage(version = self._version, \n payload_type = payloads.HorizonPayload,\n raw = raw, store_error = True)\n \n # update timestamp\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + \\\n t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n \n # find connection\n for client in self._clients:\n if client.address == addr:\n client._last = timestamp\n client.route_message(msg)\n continue\n \n # new connection\n if len(self._clients) >= self._max:\n continue\n self._clients.append(HorizonTransport_Socket(\n sock = self._udp_socket,\n host = addr[0],\n port = addr[1],\n name = \"%s:%d\" % addr,\n store_timeout = 1,\n version = self._version))\n self._clients[-1].opened = True\n self._router.add_client(self._clients[-1])\n logger.info(\"%s: New connection to %s:%d.\" % \\\n (self.__class__.__name__,self._clients[-1].address[0],\n self._clients[-1].address[1]))\n client._last = timestamp\n client.route_message(msg)\n \n \n # update timestamp\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + \\\n t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n \n # Connection Timeout?\n for i in range(len(self._clients),0,-1):\n last = self._clients[i-1].get_last_time()\n if ((timestamp - last >= self._rec_timeout) or\\\n (timestamp < last and 4294967295 - \\\n last + timestamp >= self._rec_timeout)):\n logger.warning(\"%s: Connection to %s timed-out!\" % \\\n (self.__class__.__name__,self._clients[i-1].name))\n self._router.remove_client(self._clients[i-1])\n self._clients[i-1].opened = False\n self._clients.remove(self._clients[i-1])\n \n return None", "async def find_device_by_filter(\n cls, filterfunc: AdvertisementDataFilter, timeout: float = 10.0, **kwargs\n ) -> Optional[BLEDevice]:\n async with cls(**kwargs) as scanner:\n try:\n async with async_timeout(timeout):\n async for bd, ad in scanner.advertisement_data():\n if filterfunc(bd, ad):\n return bd\n except asyncio.TimeoutError:\n return None", "def finddevice():\n\n return next((device for device in [\"xpu\"] if hasattr(torch, device) and getattr(torch, device).is_available()), None)", "def get_data(url):\n result = firebase.get(url, None)\n if result:\n return result\n else:\n return None", "def _get_bytes(self, _call_time: float) -> t.Union[bytes, None]:\n\n r = self.receive()\n\n st_t = time.time() # for timeout\n\n # wait for r to not be None or for received time to be greater than call time\n while (r is None or r[0] < _call_time):\n if (time.time() - st_t > self._timeout):\n # timeout reached\n return None\n\n r = self.receive()\n time.sleep(0.01)\n\n # r received\n return r[1]", "def get_route(self, srcif, daddr):\n routes = self.lookup_routes(daddr)\n # 1. Highest Preference\n routes = self.get_highest_preference(routes)\n #print(\"RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRROuteS:\", routes)\n # 2. Self Origin\n routes = self.get_self_origin(routes)\n # 3. Shortest ASPath\n routes = self.get_shortest_as_path(routes)\n # 4. EGP > IGP > UNK\n routes = self.get_origin_routes(routes)\n # 5. Longest ghoti matching\n routes = self.get_longest_prefix(routes)\n # 6. Lowest IP\n \n if len(routes) not in (0, 1):\n for i in range(0, 4):\n min_quads = []\n for route in routes:\n min_quads.append(route[\"peer\"].split(\".\")[i])\n min_quad = min(min_quads)\n routes = [ route for route in routes if route[\"peer\"].split(\".\")[i] == min_quad ]\n\n if len(routes) == 0:\n return None\n # Final check: enforce peering relationships\n #there should only be 1 route left at this point\n #print(\"SRCIFFFFFFFFFFFFFFFF IS\", srcif) \n recRelation = self.relations[srcif]\n routes = self.filter_relationships(recRelation, routes, srcif, True)\n #print(\"RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRroutes\", routes)\n if len(routes) == 0:\n return None\n\n return self.sockets[routes[0][\"peer\"]]", "def datagram_received(self, data, addr):\n message = data.decode()\n sysmatch = SYSLOG_MESSAGE_RE.match(message)\n if sysmatch is None:\n self.log.error('Cannot parse syslog with regex: ' + message)\n return\n\n runner = self.runner\n\n match = DHCPCD_ADD_RE.match(sysmatch.group('msg'))\n if match is not None:\n timestamp = timeparser.parse(sysmatch.group('date'))\n\n runner.loop.create_task(runner.network_added(match.group('intf'),\n match.group('route'), timestamp))\n return\n\n match = INTF_REMOVE_RE.match(sysmatch.group('msg'))\n if match is not None:\n runner.loop.create_task(runner.network_removed(match.group('intf')))\n return\n match = WPA_REMOVE_RE.match(sysmatch.group('msg'))\n if match is not None:\n runner.loop.create_task(runner.network_removed(match.group('intf')))\n return\n\n match = WPA_ADD_RE.match(sysmatch.group('msg'))\n if match is None:\n match = KERNEL_ADD_RE.match(sysmatch.group('msg'))\n if match is not None:\n # probably interface with static ip was connected\n timestamp = timeparser.parse(sysmatch.group('date'))\n\n runner.loop.create_task(runner.network_added(match.group('intf'),\n None, timestamp))\n return", "def _parse_device_from_datagram(\n device_callback: Callable[[SwitcherBase], Any], datagram: bytes\n) -> None:\n parser = DatagramParser(datagram)\n if not parser.is_switcher_originator():\n logger.debug(\"received datagram from an unknown source\")\n else:\n device_type: DeviceType = parser.get_device_type()\n if device_type == DeviceType.BREEZE:\n device_state = parser.get_thermostat_state()\n else:\n device_state = parser.get_device_state()\n if device_state == DeviceState.ON:\n power_consumption = parser.get_power_consumption()\n electric_current = watts_to_amps(power_consumption)\n else:\n power_consumption = 0\n electric_current = 0.0\n\n if device_type and device_type.category == DeviceCategory.WATER_HEATER:\n logger.debug(\"discovered a water heater switcher device\")\n device_callback(\n SwitcherWaterHeater(\n device_type,\n device_state,\n parser.get_device_id(),\n parser.get_ip_type1(),\n parser.get_mac(),\n parser.get_name(),\n power_consumption,\n electric_current,\n (\n parser.get_remaining()\n if device_state == DeviceState.ON\n else \"00:00:00\"\n ),\n parser.get_auto_shutdown(),\n )\n )\n\n elif device_type and device_type.category == DeviceCategory.POWER_PLUG:\n logger.debug(\"discovered a power plug switcher device\")\n device_callback(\n SwitcherPowerPlug(\n device_type,\n device_state,\n parser.get_device_id(),\n parser.get_ip_type1(),\n parser.get_mac(),\n parser.get_name(),\n power_consumption,\n electric_current,\n )\n )\n\n elif device_type and device_type.category == DeviceCategory.SHUTTER:\n logger.debug(\"discovered a Runner switch switcher device\")\n device_callback(\n SwitcherShutter(\n device_type,\n DeviceState.ON,\n parser.get_device_id(),\n parser.get_ip_type2(),\n parser.get_mac(),\n parser.get_name(),\n parser.get_shutter_position(),\n parser.get_shutter_direction(),\n )\n )\n\n elif device_type and device_type.category == DeviceCategory.THERMOSTAT:\n logger.debug(\"discovered a Breeze switcher device\")\n device_callback(\n SwitcherThermostat(\n device_type,\n device_state,\n parser.get_device_id(),\n parser.get_ip_type2(),\n parser.get_mac(),\n parser.get_name(),\n parser.get_thermostat_mode(),\n parser.get_thermostat_temp(),\n parser.get_thermostat_target_temp(),\n parser.get_thermostat_fan_level(),\n parser.get_thermostat_swing(),\n parser.get_thermostat_remote_id(),\n )\n )\n else:\n warn(\"discovered an unknown switcher device\")", "def _find_adapter(self):\n required_interfaces = [GATT_MANAGER_IFACE, LE_ADVERTISING_MANAGER_IFACE]\n object_manager = dbus.Interface(self.bus.get_object(BLUEZ_SERVICE_NAME, '/'), DBUS_OM_IFACE)\n objects = object_manager.GetManagedObjects()\n\n for object_path, properties in objects.items():\n missing_interfaces = [i for i in required_interfaces if i not in properties.keys()]\n if missing_interfaces:\n continue\n return object_path.rsplit('/', 1)[1]\n\n return None", "def _get_data(self):\n response = self._get_raw_data()\n if response is None:\n # error has already been logged\n return None\n\n if response.startswith('ERROR'):\n self.error(\"received ERROR\")\n return None\n\n try:\n parsed = response.split(\"\\n\")\n except AttributeError:\n self.error(\"response is invalid/empty\")\n return None\n\n # split the response\n data = {}\n for line in parsed:\n if line.startswith('STAT'):\n try:\n t = line[5:].split(' ')\n data[t[0]] = t[1]\n except (IndexError, ValueError):\n self.debug(\"invalid line received: \" + str(line))\n pass\n\n if len(data) == 0:\n self.error(\"received data doesn't have any records\")\n return None\n\n # custom calculations\n try:\n data['avail'] = int(data['limit_maxbytes']) - int(data['bytes'])\n data['used'] = int(data['bytes'])\n except:\n pass\n\n return data", "def get_ip():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n return s.getsockname()[0]\n except:\n return '127.0.0.1'\n finally:\n s.close()", "def try_dereference(gdbval):\n try:\n ret = gdbval.dereference()\n ret.fetch_lazy()\n return ret\n except gdb.MemoryError:\n return None", "def try_get_field(self, field_name: str) -> Optional[fields.Field]:\n field = getattr(self, field_name, None)\n if isinstance(field, fields.Field):\n return field\n return None", "def get_device_by_ip(self, device_ip):\n\n found_device = None\n\n devices = self.get_all_devices()\n for device in devices:\n if getattr(device, 'ip', None) == device_ip:\n found_device = device\n\n if found_device is None:\n raise LogglyException(\"No device found with ip: %s\" % device_ip)\n\n return found_device", "def read_until_null(self):\r\n # Check socket connection\r\n if self.connected:\r\n # Get result data from debugger engine\r\n try:\r\n while not '\\x00' in self.buffer:\r\n self.buffer += H.data_read(self.socket.recv(self.read_size))\r\n data, self.buffer = self.buffer.split('\\x00', 1)\r\n return data\r\n except:\r\n e = sys.exc_info()[1]\r\n raise ProtocolConnectionException(e)\r\n else:\r\n raise ProtocolConnectionException(\"Xdebug is not connected\")", "def peek(self):\n size = self._list.size()\n if size == 0:\n return None\n return self._list.tail.data", "def read_packet(self, blocking=True, timeout=None):\n return self.data_queue.get(blocking, timeout=timeout)", "def find_ipv6():\n\n test_host = '2600::' # Sprint.net\n try:\n with socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) as s:\n s.connect((test_host, 53))\n ipv6 = s.getsockname()[0]\n except:\n if cfg['debug']:\n print(\"Couldn't create a socket to %s\" % test_host)\n print(\"Check that you have a valid IPv6 default route\")\n ipv6 = None\n\n return ipv6", "def dequeue_if_needed(self) -> Optional[torch.cuda.Event]:\n if len(self._queue) >= self._max_num_inflight_all_gathers:\n return self._dequeue()\n return None", "def get_device(self, field):\n return self._devices[field]", "def get_network_adapter() -> network.NetworkAdapter:\n if (ip := os.getenv('ref_ip')) is not None: # noqa: SIM112\n return network.get_adapter_containing_ip(ip)\n # get next available loopback adapter\n return next(adapter for adapter in network.get_adapters() if adapter.is_loopback)", "def _internal_lookup_device_by_keyid(self, keyid) -> Optional[LandscapeDevice]:\n\n self.landscape_lock.acquire()\n try:\n device = None\n if keyid in self._all_devices:\n device = self._all_devices[keyid]\n finally:\n self.landscape_lock.release()\n\n return device", "def _dequeue(self) -> Optional[torch.cuda.Event]:\n if self._queue:\n event = self._queue.popleft()\n return event\n return None", "def get_packet(self):\n if len(self._read_queue) == 0:\n raise NoPacketException()\n\n return self._read_queue.popleft()", "def maybe_dequeue(self):\n if self._queue.enqueued:\n return self._queue.dequeue()\n else:\n return None", "def get_discovery_message(self):\n return self.messages[\"discovery\"].get()", "def receive(self):\n events = self.poller.poll(self.timeout)\n\n # If there is control socket, he has the priority\n if len(events) == 2:\n return self._recv_serialized(self.control_socket)\n elif len(events) == 1:\n return self._recv_serialized(events[0][0])\n return None", "def find_ipv4(str_to_search):\n ipv4_match = re.search('[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+', str_to_search, re.M | re.I)\n if ipv4_match:\n first, last = ipv4_match.span()\n return ipv4_match.string[first:last]\n return None", "def get_route(self, srcif, daddr):\n peer = None\n routes = self.lookup_routes(daddr)\n\n # Rules go here\n if routes:\n # 1. Highest Preference\n routes = self.get_highest_preference(routes)\n # 2. Self Origin\n routes = self.get_self_origin(routes)\n # 3. Shortest ASPath\n routes = self.get_shortest_as_path(routes)\n # 4. EGP > IGP > UNK\n routes = self.get_origin_routes(routes)\n # 5. Lowest IP Address\n routes = self.get_lowest_ip(routes)\n # Final check: enforce peering relationships\n routes = self.filter_relationships(srcif, routes)\n\n if len(routes) > 0 and routes[0][SRCE]:\n return self.sockets[routes[0][SRCE]]\n else:\n # No viable routes found\n return None", "def get_address_without_netmask(device, interface, address_family,\r\n return_all=False):\r\n ip_addr_with_mask = get_interface_ip_address(\r\n device=device,\r\n interface=interface, \r\n address_family=address_family)\r\n\r\n if ip_addr_with_mask:\r\n return ip_addr_with_mask.split('/')[0]\r\n\r\n return None", "def get(self):\n if len(self.storage) != 0:\n line = self.get_next_line()\n while line == \"\":\n if len(self.storage) != 0:\n line = self.get_next_line()\n else:\n return None\n return line\n else:\n return None", "async def udp_client(url):\n host, port = pytak.parse_cot_url(url)\n stream = await pytak.asyncio_dgram.connect((host, port))\n if \"broadcast\" in url.scheme:\n sock = stream.socket\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n return stream", "def get(self):\n try:\n return self.url_queue.get(timeout=self.timeout)\n except Exception as e:\n print(e)\n return None", "def get_ddwrt_data(self):\n if self.protocol == 'http':\n if not self.hostname_cache:\n _LOGGER.debug('Getting hostnames')\n # get hostnames from dhcp leases\n url = 'http://{}/Status_Lan.live.asp'.format(self.host)\n data = self.http_connection(url)\n\n # no data received\n if data is None:\n _LOGGER.debug('No hostname data received')\n return None\n\n dhcp_leases = data.get('dhcp_leases', None)\n\n # parse and cache leases\n if dhcp_leases:\n _LOGGER.debug('Parsing http leases')\n self.hostname_cache = _parse_http_leases(dhcp_leases)\n\n _LOGGER.debug('Getting active clients')\n # get active wireless clients\n url = 'http://{}/Status_Wireless.live.asp'.format(self.host)\n data = self.http_connection(url)\n\n if data is None:\n _LOGGER.debug('No active clients received')\n return None\n\n _LOGGER.debug('Parsing http clients')\n return _parse_http_wireless(data.get('active_wireless', None))\n\n elif self.protocol == 'ssh':\n active_clients = []\n # when no cache get leases\n if not self.hostname_cache:\n host_data = self.ssh_connection(self.host,\n [_DDWRT_LEASES_CMD,\n self.ddwrt_cmd])\n _LOGGER.debug(\n 'host_cache_data: {0}'.format(str(host_data)))\n if not host_data:\n return None\n\n self.hostname_cache = {l.split(\",\")[0]: l.split(\",\")[1]\n for l in host_data[0]}\n active_clients = [mac.lower() for mac in host_data[1]]\n else:\n host_data = self.ssh_connection(self.host, [self.ddwrt_cmd])\n _LOGGER.debug('host_data: {0}'.format(str(host_data)))\n if host_data:\n active_clients = [mac.lower() for mac in host_data[0]]\n\n for ap in self.aps:\n ap_data = self.ssh_connection(ap, [self.ddwrt_cmd])\n _LOGGER.debug('ap_data: {0}'.format(str(ap_data)))\n if ap_data:\n active_clients.extend([m.lower() for m in ap_data[0]])\n\n return active_clients", "def poll_data(self):\n with s.socket(s.AF_INET, s.SOCK_DGRAM) as sock:\n sock.bind(('', self.__port))\n while True:\n message, address = sock.recvfrom(1024)\n self.__address = address\n logging.debug('Received: {}'.format(message))\n self.process_data(message)", "def dpae_join(self, pkt, datapath, in_port):\n _payload = str(pkt.protocols[-1])\n self.logger.info(\"Phase 2 DPAE discovery packet received from dpid=%s \"\n \"port=%s payload=%s\",\n datapath.id, in_port, _payload)\n #*** Try decode of payload as JSON:\n try:\n dpae_discover = json.loads(_payload)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n self.logger.error(\"Phase 2 DPAE API Create exception while \"\n \"decoding JSON body=%s Exception %s, %s, %s\",\n _payload, exc_type, exc_value, exc_traceback)\n return 0\n #*** Check to see if JSON has a uuid_controller key:\n if 'uuid_controller' in dpae_discover:\n uuid_controller = dpae_discover['uuid_controller']\n else:\n self.logger.debug(\"No uuid_controller field in discovery \"\n \"packet so ignoring...\")\n return 0\n #*** Check to see if JSON has a hostname_dpae key:\n if 'hostname_dpae' in dpae_discover:\n hostname_dpae = dpae_discover['hostname_dpae']\n else:\n self.logger.debug(\"No hostname_dpae field in discovery \"\n \"packet so ignoring...\")\n return 0\n #*** Check to see if JSON has a if_name key:\n if 'if_name' in dpae_discover:\n if_name = dpae_discover['if_name']\n else:\n self.logger.debug(\"No if_name field in discovery \"\n \"packet so ignoring...\")\n return 0\n #*** Check to see if JSON has a uuid_dpae key:\n if 'uuid_dpae' in dpae_discover:\n uuid_dpae = dpae_discover['uuid_dpae']\n else:\n self.logger.debug(\"No uuid_dpae field in discovery \"\n \"packet so ignoring...\")\n return 0\n #*** Look the key up in the database:\n db_result = self.dbdpae.find_one({'_id': str(uuid_controller)})\n if db_result:\n #*** Check all fields match:\n if not hostname_dpae == str(db_result[u'hostname_dpae']):\n self.logger.error(\"Phase 2 hostname_dpae mismatch\")\n return 0\n if not if_name == str(db_result[u'if_name']):\n self.logger.error(\"Phase 2 if_name mismatch\")\n return 0\n if not uuid_dpae == str(db_result[u'uuid_dpae']):\n self.logger.error(\"Phase 2 uuid_dpae mismatch\")\n return 0\n self.logger.debug(\"Phase 2 updating DPAE record\")\n db_result = self.dbdpae.update_one(\n {'_id': str(uuid_controller)},\n {\n '$set': {\n 'dpid': datapath.id,\n 'switch_port': in_port\n },\n }\n )\n self.logger.debug(\"Phase 2 updated %s database record(s)\",\n db_result.modified_count)\n else:\n #*** Ignore as no uuid_controller key:\n self.logger.debug(\"Phase 2 discovery packet uuid_controller field \"\n \"not found in database, so ignoring...\")\n return 0", "def getEndpoint(self, endpoint):\n # If endpoint not recognized, you get None\n self.__lockobj.acquire()\n retval = None\n if endpoint in self.__endpoints.keys():\n retval = self.__endpoints[endpoint]\n self.__lockobj.acquire()\n return retval", "def peek(self):\n if self.is_empty():\n return None\n return self.list.head.data", "def _get_str(self, _call_time: float, read_until: t.Union[None, str], strip: bool = True) -> t.Union[str, None]:\n\n r = self.receive_str(read_until=read_until, strip=strip)\n\n st_t = time.time() # for timeout\n\n # wait for r to not be None or for received time to be greater than call time\n while (r is None or r[0] < _call_time):\n if (time.time() - st_t > self._timeout):\n # timeout reached\n return None\n\n r = self.receive_str(read_until=read_until, strip=strip)\n time.sleep(0.01)\n\n # r received\n return r[1]", "def recvPacket(self) -> Rudp.Packet:\n (packet, validity, c) = self.recv()\n if(c != self.client):\n raise Rudp.WrongClient(\"Wrong Package from \" + c)\n return packet", "def get(self, block_until_data=True):\n wait_for(lambda: not (block_until_data and self.data is None))\n \n with self.lock:\n return self.data", "def from_network_layer(buffer):\r\n packet = buffer.get_packet()\r\n # print(f'buffer.message:{buffer.message}')\r\n # if packet == None:\r\n # print(f\"[from_network_layer] packet:NULL\")\r\n print(f\"[from_network_layer] packet:{packet}\")\r\n return packet", "def get_udp_video_address(self):\r\n return f'udp://{self.tello_address[0]}:11111'", "def get_ip_address2(ifname):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(\n s.fileno(),\n 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])\n except:\n return None", "def _receive(self):\n # initialize sockets map\n r, w, x = [self.socket], [], []\n r, w, x = select.select(r, w, x, self.sessiondata.timeout)\n if r:\n return self.socket.recv(4096)\n # return nothing on timeout\n return None" ]
[ "0.5770412", "0.5708058", "0.5640891", "0.5573784", "0.5538548", "0.5535006", "0.5515138", "0.54124844", "0.5348417", "0.5336429", "0.532234", "0.5316779", "0.5302935", "0.5275738", "0.5237896", "0.52374405", "0.5197468", "0.5190849", "0.5188156", "0.51715595", "0.51383466", "0.51308936", "0.5101702", "0.5088838", "0.5085172", "0.5059413", "0.50117445", "0.5011347", "0.50078803", "0.50032514", "0.50007844", "0.49851012", "0.49829224", "0.4950612", "0.4941264", "0.4932328", "0.49302593", "0.49300823", "0.4925986", "0.49229714", "0.4922208", "0.49185932", "0.49177602", "0.49147868", "0.4911977", "0.49085575", "0.49073246", "0.48932976", "0.48919722", "0.48826534", "0.48789072", "0.48770624", "0.487617", "0.48744595", "0.4867051", "0.48534858", "0.48504695", "0.48493394", "0.48491788", "0.48476344", "0.4846893", "0.48461872", "0.4837947", "0.48367122", "0.48362917", "0.4833604", "0.48271853", "0.48235726", "0.48197418", "0.4805892", "0.48047313", "0.4802069", "0.4797103", "0.47952518", "0.47922024", "0.47902152", "0.47824216", "0.47806528", "0.4773676", "0.47690505", "0.47659037", "0.47633782", "0.47615206", "0.4760595", "0.4757677", "0.475532", "0.47463745", "0.47415146", "0.47414118", "0.47403622", "0.47386864", "0.473775", "0.47350103", "0.47343493", "0.47237197", "0.47137052", "0.47101828", "0.47067356", "0.4705831", "0.47047248", "0.47004098" ]
0.0
-1
simply sends a message to the client address specified.
def send_net_message_client(message, client_addr): serverSocket.sendto(message, client_addr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def msg_client(msg, client):\r\n client.send(bytes(str(msg), \"utf-8\"))", "def sendToClient(self, client_id, message_type, message):\n if not client_id in self.client_to_socket:\n raise ValueError(\"The client with id {} does not exist\".format(client_id))\n self.sendToSocket(self.client_to_socket[client_id],message_type,message)", "def send_message(self, client, message):\n self.stdout.write(message)\n client.send(f'HTTP/1.1 200 OK\\r\\n\\r\\n{message}'.encode(\"utf-8\"))\n client.close()", "def send_msg(self, payload, to_addr, reply_to_addr):\n self._client.send_msg(payload, to_addr, reply_to_addr)", "def send(msg): # event is passed by binders.\n # print(\"i sended: \" + msg)\n msg = msg + \";\"\n client_socket.send(bytes(msg, \"utf8\"))", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "async def send_to_user(self, user: User, msg: Msg, address: str = None):\n if address is None:\n address = user.current_address\n\n await self.send(msg, address)", "def send_stun(self, message, addr):\n logger.debug('%s > %s %s', self, addr, message)\n self.transport.sendto(bytes(message), addr)", "def client():\n host = '127.0.0.1'\n port = 8125\n sock = socket.socket(\n socket.AF_INET,\n socket.SOCK_DGRAM)\n sock.connect((host, port))\n def send(msg):\n sock.sendall(msg)\n return send", "def client(ip, port, message):\n\n # Connect to the server\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, port))\n try:\n sock.sendall(bytes(message, 'ascii'))\n response = str(sock.recv(BUF_SIZE), 'ascii')\n print(\"Client received: {}\".format(response))\n finally:\n sock.close()", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "def send_message(self, data):\n header, data = format_msg(data)\n self.server_socket.sendto(header, self.client_address)\n self.server_socket.sendto(data, self.client_address)", "def _send_via_transport(self, message):\n\n self.message_interface.send(message)", "def send_message(self, message):\n\t\tself.logger.send(\"{0} - {1}\".format(self.peerip, str(message)))\n\t\ttry:\n\t\t\tself.socket.sendall(message.get_message(self.coin))\n\t\texcept socket.error as err:\n\t\t\tself.stop(err.errno,'send_message')", "def send(self, message):\n self.sock.send(message)", "def send(message):\n\tmessage = message.encode()\n\tconn.send(message)", "def sendMessage(self, msg):\n # Socket Object\n self.sock.connect((self.host, self.port))\n self.sock.send(msg)\n self.sock.close()", "def send(self, client, data):\n try:\n client.send(data)\n except Exception:\n self.clients.remove(client)", "def _send(self, message):\n self.sock.sendall('%s\\n' % message)", "def send(self, msg):\n self.message('Me', msg)", "def send(self, message):\n self.logger.info(\"Sending to server: %s\" % message)\n self.sendLine(message)", "def broadcast_message(msg: str):\r\n\tfor ip in _clients.keys():\r\n\t\tsend_message(ip, msg)", "def send(msg, dest=None):", "def send_to_client(self, ip_addr: str, data, compress=True):\n if self.host is not None:\n try:\n client_conn_obj = self.host.lookup_client(ip_addr)\n self.host.callback_client_send(client_conn_obj, data, compress)\n except MastermindErrorSocket as e:\n raise MastermindErrorSocket(e)\n except Networking.Host.ClientNotFoundException:\n logging.error(f\"Client at {ip_addr} is not connected\")\n else:\n raise MastermindErrorServer(\"Server is not available\")", "def send(self,message):\n self.transport.write(message, (\"228.0.0.5\", udpbport))", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def send(self, message_body: str, target: str):\n\t\tif target == 'local':\n\t\t\tself.client_process(message_body)\n\t\telse:\n\t\t\twith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n\t\t\t\ttry:\n\t\t\t\t\tsock.settimeout(1)\n\t\t\t\t\tsock.connect((target, self.channel_port))\n\t\t\t\t\tsock.send(message_body.encode())\n\t\t\t\texcept socket.timeout:\n\t\t\t\t\tself.registry.delete_ip(target)", "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send(self, address: Address, packet: StrictPacket):\n with self._clientDictLock:\n self._clients[address].send(packet)", "def send_message(client):\n send_msg = [client.TYPE, client.accepted, client.heaps[0], client.heaps[1], client.heaps[2], client.win, END]\n send_msg = [int(e) for e in send_msg]\n packed_data = struct.pack(SERVER_SEND_FORMAT, send_msg[0], send_msg[1], send_msg[2], send_msg[3], send_msg[4],\n send_msg[5], send_msg[6])\n\n to_next_stage = client.nonblocking_send(\n packed_data) # figuring-out if in the next loop we have to comeback here cuz we dont have sendall anymore\n if to_next_stage:\n client.stage = 1 # indicating that in the next round we dont have to comeback to the greeting message and we should recv message from client", "def broadcast(msg):\n\n for sock in clients:\n sock.send(bytes(msg, \"utf-8\"))", "def send(self, address, message):\n self.__set_i2c_address(address)\n self.__write(message)", "def send(self):\n if(self.target):\n try:\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)\n except socket.error, err:\n print err", "async def send(self, msg: Msg, address: str):\n ident, interface = address.split(address_split_char)\n\n try:\n inbox = self.plugin_inboxes[interface]\n except AttributeError:\n raise AzuraBotError(f\"There is no inbox registered for \"\n f\"'{interface} (address {address})\")\n\n print(f\"[bot] Private message: AzuraBot->{address}: {msg.text}\")\n await inbox.put(msg)", "def send_message(self, message:str):\r\n msg_send = message.encode()\r\n self.server_connection.send(msg_send)", "def send_message(self, message):\n self.print_debug_message(message)\n self.socket.send(message)", "def client(ip, port, message): \n # Conectado con el servidor\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, port))\n try:\n sock.sendall(bytes(message, 'utf-8'))\n response = sock.recv(BUF_SIZE)\n print (\"Recibido por el cliente: %s\" %response)\n finally:\n sock.close()", "def send_message(self, message):\n msg_bytes = (\n f'{self.username}{self.delimiter}{message}'\n ).encode('utf-8')\n self.socket.writeDatagram(\n qtc.QByteArray(msg_bytes),\n qtn.QHostAddress.Broadcast,\n self.port\n )", "def send(self, msg):\n self.__sock.send(msg)", "def send_reply(self, message, data=None):\n try:\n client = self._active_clients[message[\"client-tag\"]]\n except KeyError:\n self._log.error(\"send: No active client %s message discarded\" % (\n message[\"client-tag\"]\n ))\n else:\n client.send(message, data)", "def send(self, msg: str):\n message = msg.encode(HttpClient.FORMAT)\n self.client.send(message)\n print(\"[MESSAGE] message sent:\", msg)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\")+msg)", "def broadcast(self, addr, message):\n for addr in set(six.iterkeys(self.addr_to_conn_struct_map)) - {addr}:\n try:\n self.addr_to_conn_struct_map[addr].conn.send(message)\n except:\n # if we have any error sending, close the client connection, then remove it from our list\n self.clean(addr)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\r\n\r\n for sock in clients:\r\n sock.send(bytes(prefix, \"utf8\") + msg)", "def send_message(message, client):\n try:\n message = (client.users().messages().send(userId='me', body=message).execute())\n print('Message Id: %s' % message['id'])\n return message\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def send(self, msg, label=\"\"):\n self.remoter.tx(msg) # send to remote\n log.debug(\"%s sent %s:\\n%s\\n\\n\", self.remoter, label, bytes(msg))", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "def _send_message(self, path, arg_lst):\n self._client.send_message(path, arg_lst)", "def sendMsg(self, msg):\n self.sockUDP.sendto(bytes(msg), self.serverAddress)\n logger.debug(\"sent: %r\", msg)", "def send_message(self, message):\n pass", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\n for sock in clients:\n sock.send(bytes(prefix, \"utf8\")+msg)", "def send_message(self, message):\n\n self.socket.send(message.serialize())", "def message(self, client, text, *args):\n if client is None:\n self.say(text % args)\n elif client.cid is None:\n pass\n else:\n print \"sending msg to %s: %s\" % (client.name, re.sub(re.compile('\\^[0-9]'), '', text % args).strip())", "def broadcast(self, msg):\n for client in self.clients.values():\n send_data(client.socket, msg)", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\n for sock in clients:\n sock.send(bytes(prefix) + msg)", "def sendto(self, msg, addr, family):\n sock = socket.socket(family, socket.SOCK_DGRAM)\n sock.sendto(msg, addr)\n time.sleep(0.1)", "def send(self, message):\n _check_message_type(message=message)\n response = requests.post(\n self._server_url + _SEND_URL,\n data={\"id\": self._chat_id, \"msg\": message}\n )", "def sms(self, phone_address, message):\n self.server.sendmail(self.username, phone_address, message)", "def send(self, message):\n pass", "def send_message(self, message, address, verbose=False):\n assert isinstance(message, Message.Implementation)\n assert isinstance(address, tuple)\n assert isinstance(verbose, bool)\n self.encode_message(message)\n if verbose:\n logger.debug(\"%s (%d bytes) to %s:%d\", message.name, len(message.packet), address[0], address[1])\n self.send_packet(message.packet, address)\n return message", "def broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\tfor sock in clients:\n\t\tsock.send(bytes(prefix, \"utf8\")+msg)", "def send_message(self, message, socket):\n socket.send(bytes(message, 'UTF-8'))", "def send_message(self, message):\n self.client.queue.put(message)", "def send_message(self, message):\n encoded_message = self.encode_message(message)\n self.socket.send(encoded_message)", "async def send_msg(self, message: str) -> None:\n await self.socket.sendall(message.encode())", "def send_public_message(self, userMessage):\n for client in self.clients:\n self.send_message(userMessage, client.get_socket())", "def msg(self, target, message):\n self.server.message_queue.put(('[email protected]', target, message))", "def sendto(self, data, addr):\n asyncio.ensure_future(self.__inner_protocol.send_data(data, addr))", "def sendServiceMessage(self, message):\n self.sendMessage(self.SERVICE_TO_SERVICE, message)", "def send_message(stdscr, username=None):\n # Show the cursor and echo output.\n curses.curs_set(1)\n curses.echo()\n stdscr.clear()\n stdscr.refresh()\n if username is None:\n safe_put(stdscr, \"Recipient username: \", (0, 0))\n username = stdscr.getstr(0, 20)\n stdscr.clear()\n stdscr.refresh()\n tnu = taunet.users.by_name(username)\n if tnu == None:\n print(\"No such user. Known users: \" + \", \".join(sorted([u.name for u in taunet.users.all()])))\n return\n if not is_online(tnu):\n print(\"Couldn't connect to that user's host.\")\n return\n safe_put(stdscr, \"Message:\", (0, 0))\n message = stdscr.getstr(0, 9)\n stdscr.clear()\n stdscr.refresh()\n ship_tnm(tnu, taunet.TauNetMessage().outgoing(tnu.name, message))", "def fsend(var, wrapper, message):\n wrapper.source.client.send(message)", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def send_message(message: str, bot_client=bot):\n try:\n return bot_client.send_message(chat_id=CHAT_ID, text=message)\n except telegram.error.TelegramError as e:\n logger.exception(e)\n raise", "def send_message(self, header, message):\n self.__logger.debug(\"Sending message for (%d: %d)\", header.deviceType, header.deviceID)\n stream = AmberClient.__prepare_stream_from_header_and_message(header, message)\n self.__socket_sendto_lock.acquire()\n try:\n self.__socket.sendto(stream, (self.__hostname, self.__port))\n finally:\n self.__socket_sendto_lock.release()", "async def send_message(user, message):\n try:\n return await user.send(message)\n except ConnectionClosed:\n pass", "def send_message_tcp(address, port, message):\n socket_connection = socket.socket( socket.AF_INET, socket.SOCK_STREAM ) # tcp\n socket_connection.connect((address, port))\n socket_connection.recv(2048)\n socket_connection.send(message)\n response = socket_connection.recv(2048)\n socket_connection.close()\n return response", "def send_to_webclient(message):\n try:\n msgStr = json.dumps(message)\n except:\n log(2,\"Could not convert message to JSON string: {0}\".format(message))\n return\n\n try:\n log(0,\"Msg to WebCl: {0}\".format(msgStr))\n emit('webcl', msgStr, broadcast=True)\n except:\n log(2,\"Could not communicate to web client: {0}\".format(msgStr))", "def send(self, service_id, destination_address, data):\n self.send_from(service_id, destination_address, self.address, data)", "def transmit(self, msg):\r\n # send our message to the client\r\n self.conn.sendall(msg)", "def _send(self, what, value, address='localhost:44818', **kwargs):\n\n tag_string = ''\n tag_string = EnipProtocol._tuple_to_cpppo_tag(what, value)\n # print 'DEBUG enip _send tag_string: ', tag_string\n\n cmd = shlex.split(\n self._client_cmd +\n '--log ' + self._client_log +\n '--address ' + address +\n ' ' + tag_string\n )\n # print 'DEBUG enip _send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR enip _send: ', error)", "def sendto(self, name, msg):\n self.send(\"send/{}/{}:{}\".format(self.msg_id, name, msg))\n self.msg_id += 1", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def send_message():\n try:\n sock = socket(AF_INET, SOCK_STREAM)\n sock.connect((SERVER_IP, SERVER_PORT))\n print('[+] ' + SERVER_IP + ' connected!')\n position = MESSAGE.encode('utf-8')\n sock.send(bytes(position))\n sock.close()\n print('[+] Transfer completed!')\n except Exception as e:\n print('[-]', e)", "def send(event=None): #event is passed by binders.\n try:\n msg = my_msg.get()\n my_msg.set(\" \") #Clears input field.\n client_socket.send(bytes(msg, \"utf8\"))\n \n except:\n \n HOST = '10.0.0.8'\n PORT = 8081\n ADDR = (HOST, PORT)\n \n s = socket(AF_INET, SOCK_STREAM)\n client_socket.bind(ADDR)\n s.connect((HOST, PORT))\n s.send(msg)\n \n if msg == \"{quit}\":\n client_socket.close()\n top.destroy()", "def client(self,message):\n self.message = message\n self.run()", "def send(self,data,address,isBinary=False):\n if DEBUG: print \"In class Server, function, send\"\n #dest = self.resolve_address(address)\n peer_to_send_message = None\n #or uid in self.clientManager.peer_servers:\n #pdb.set_trace()\n #peer_server = self.clientManager.connections[uid]\n #if peer_server.ip == address:\n #peer_to_send_message = peer_server\n #pdb.set_trace()\n return self.connection_manager.send(data,address,isBinary)\n \n #for client in self.clientManager.connections.keys():\n #pdb.set_trace()\n #self.clientManager.connections[client].sendMessage(\"------From RBAnalysis---Hi\")", "def send(event=None): # event is passed by binders.\r\n msg = my_msg.get()\r\n my_msg.set(\"\") # Clears input field.\r\n client_socket.send(bytes(msg, \"utf8\"))\r\n if msg == \"{quit}\":\r\n client_socket.close()\r\n top.quit()", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "def send_message_to_client(self, client, output_str):\n # TODO throw client not found exception\n # TODO throw socket closed exception\n client_socket = self.all_clients[client]\n\n byte_array_message = str.encode(output_str)\n # We are packing the length of the packet to\n # unsigned big endian struct to make sure that it is always constant length\n client_socket.send(struct.pack('>I', len(byte_array_message)) + byte_array_message)", "def send_message(message, destination):\n\n #Your code here\n pass", "def send(self, message, header='message'):\n if not message: return\n self.socket.sendall((header+':'+message).encode())", "def email(self, email_address, message):\n self.server.sendmail(self.username, email_address, message)", "def send_a_message(self, message: str):\n\n client = Client(account_sid, auth_token)\n\n message = client.messages \\\n .create(\n body=message,\n to=self.emergency_number,\n from_='+16505499680'\n )\n\n print(message.sid)", "def sendto(self, data: bytes, address: Tuple) -> int:\n ...", "def sendto(self,msg,address):\n\n address = self.pubIPToMorse(address);\n \n if not self.validIPAndPort:\n print(\"Error: Invalid IP and port or socket has not been bound with an IP and port: message not sent!\");\n return;\n\n to_ip_addr = address[0];\n to_port = address[1];\n msg = msg.decode(\"utf-8\"); #Convert from bytearray to a string for ease of operation\n\n # Assemble UDP package\n udp_package = to_port + self.my_port + msg;\n\n # Assemble IP package\n ip_header = to_ip_addr + self.my_ip_addr + self.protocol_identifier + t.base36encode(len(udp_package));\n ip_package = ip_header + udp_package;\n\n # Assemble MAC package\n # First check to see if the MAC of the recieving IP is known, if not address message to router\n if to_ip_addr in self.macDict.keys(): mac_to = self.macDict[to_ip_addr];\n else: mac_to = self.macDict['router_mac']; # This only works if you're not the router...\n # Then assemble the remainder of the MAC package\n mac_from = self.my_mac;\n # Send the message\n print(mac_to+mac_from+ip_package)\n t.sendMessage(mac_to,mac_from,ip_package);", "def just_send(self, client_socket, msg):\n msg = msg.encode('utf-8')\n message_header = f\"{len(msg):<{HEADER_LENGTH}}\".encode('utf-8')\n client_socket.send(message_header + msg)\n return", "def send_message(msg, settings):\n from_jid = xmpp.protocol.JID(settings['xmpp_jid'])\n passwd = settings['xmpp_password']\n\n client = xmpp.Client(from_jid.getDomain(), debug=[])\n if client.connect():\n if client.auth(from_jid.getNode(), passwd):\n client.send(msg)\n client.disconnect()", "def send_message(self, message):\n if self.connected:\n self.send(\n json.dumps(message.request))", "def transmit(ip_address, port, message):\n # Specify the message to be sent\n sock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n\n # Send the message to the specified IP and port\n sock.sendto(message, (ip_address, port))\n # print(\"Message sent\")" ]
[ "0.7387198", "0.6993373", "0.6930433", "0.69239", "0.6904713", "0.68367815", "0.6811525", "0.67738485", "0.6729188", "0.6701323", "0.6656938", "0.6642157", "0.66237146", "0.6605746", "0.6597053", "0.6596277", "0.6579792", "0.65749264", "0.6560786", "0.6554997", "0.65531754", "0.65393853", "0.65379685", "0.6532616", "0.65285164", "0.65186024", "0.6511578", "0.65010464", "0.6479287", "0.6472287", "0.64628386", "0.6450381", "0.6448615", "0.6444983", "0.6440521", "0.6438534", "0.64172304", "0.6413799", "0.64054865", "0.63971126", "0.6391926", "0.6389668", "0.63791025", "0.6377808", "0.6377762", "0.63737124", "0.6368152", "0.6364913", "0.63560414", "0.6354352", "0.6354175", "0.6351976", "0.6347748", "0.6335558", "0.633536", "0.63336086", "0.6320268", "0.6316036", "0.6314409", "0.63019353", "0.6289227", "0.6285851", "0.62809455", "0.6267487", "0.6266301", "0.6247465", "0.62458223", "0.62373036", "0.62338316", "0.6231437", "0.62237257", "0.6219339", "0.62180597", "0.6201505", "0.6198065", "0.619319", "0.61930275", "0.6191049", "0.6184045", "0.6181792", "0.6170437", "0.6169864", "0.6161958", "0.61548245", "0.6154302", "0.6154022", "0.6151929", "0.6151583", "0.61361474", "0.61329997", "0.6132932", "0.6125589", "0.6115578", "0.611435", "0.610544", "0.6102987", "0.61025375", "0.6090688", "0.6086494", "0.60856503" ]
0.82171863
0
process incoming messages from clients.
def process_net_message(message, address): if message[0] == '<' and message[-1] == '>': message = message[1:-1] if ":" in message: command, data = message.split(":") else: command = message data = None if command == "JOIN": print("added player to player list:", data, address) ip_address, port = address active_player_dict[str(address)] = Player(ip_address, port, data, random.randint(0, 639), random.randint(0, 479)) elif command == "QUIT": print("player removed from player list:", address) del active_player_dict[str(address)] elif command == "KD": data = chr(int(data)) if data not in active_player_dict[str(address)].keys_down: active_player_dict[str(address)].keys_down.append(data) elif command == "KU": data = chr(int(data)) if data in active_player_dict[str(address)].keys_down: active_player_dict[str(address)].keys_down.remove(data) elif command == "keepAlive": data = int(data) if active_player_dict[str(address)].alive > 0: #time for player to be alive is not zero active_player_dict[str(address)].alive = data currentTime = time.time() else: print("invalid message.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_clients():\n for client in state.CLIENT_LIST:\n if client.active and client.cmd_ready:\n logging.debug(\"Found a message, processing...\")\n msg_processor(client)", "async def _process_messages(self) -> None:\n try:\n while not self._client.closed:\n msg = await self._client.receive()\n\n if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSED, WSMsgType.CLOSING):\n break\n\n if msg.type == WSMsgType.ERROR:\n raise ConnectionFailed()\n\n if msg.type != WSMsgType.TEXT:\n raise InvalidMessage(f\"Received non-Text message: {msg.type}\")\n\n try:\n data = msg.json(loads=ujson.loads)\n except ValueError as err:\n raise InvalidMessage(\"Received invalid JSON.\") from err\n\n if LOGGER.isEnabledFor(logging.DEBUG):\n LOGGER.debug(\"Received message:\\n%s\\n\", pprint.pformat(msg))\n\n self._handle_incoming_message(data)\n\n finally:\n # TODO: handle reconnect!\n LOGGER.debug(\"Listen completed. Cleaning up\")\n\n for future in self._result_futures.values():\n future.cancel()\n\n if not self._client.closed:\n await self._client.close()\n\n if self._shutdown_complete_event:\n self._shutdown_complete_event.set()\n else:\n LOGGER.debug(\"Connection lost, will reconnect in 10 seconds...\")\n self._loop.create_task(self._auto_reconnect())", "def _process_messages(self):\r\n \r\n self._print(\"%s: Starting _process messages, looking out for special messages:\" \\\r\n % (self._clientnr))\r\n \r\n # Set some expected messages.\r\n expected = {}\r\n expected['clientconfirm'] = cb.CLIENTCONFIRM[:cb.CLIENTCONFIRM.find('_')]\r\n expected['waitwhat'] = cb.WAITWHATCLIENT[:cb.WAITWHATCLIENT.find('_')]\r\n \r\n for key in expected.keys():\r\n self._print(\"%s: Special message '%s': '%s'\" % \\\r\n (self._clientnr, key, expected[key]))\r\n \r\n # Run idefinitively\r\n while True:\r\n \r\n # Get new incoming commands.\r\n cmds = self.udp.getCommands()\r\n self._print(\"%s: Found %d new UDP commands.\" % \\\r\n (self._clientnr, len(cmds)))\r\n # Add new commands to the queue.\r\n for c in cmds:\r\n # Parse the message.\r\n target, message, clienttime = c.text.split('|')\r\n self._print(\"%s: Found message (%s to %s, t=%s) '%s'\" % \\\r\n (self._clientnr, c.ip, target, clienttime, message))\r\n # Only process messages from the server.\r\n if c.ip == self._servernr:\r\n # Check if this is a client confirmation message.\r\n if expected['clientconfirm'] in message:\r\n self._print(\"%s: Adding message '%s' (t=%s) to the incoming queue\" \\\r\n % (self._clientnr, message, clienttime))\r\n self._incominglock.acquire()\r\n self._incoming.append(message)\r\n self._incominglock.release()\r\n # Only process the messages that were directed at this client.\r\n elif target in ['None', str(self._clientnr)]:\r\n # Check if this is a confused message to find out what\r\n # the client is waiting for.\r\n if expected['waitwhat'] in message:\r\n self._print(\"%s: Received '%s' from server\" % \\\r\n (self._clientnr, message))\r\n # Parse the waitwhat message, which looks like this:\r\n # 'waitwhatclient_expected=%s'\r\n msg, xpctd = message.split('_')\r\n xpctd = xpctd[xpctd.find('=')+1:]\r\n # Re-send the last version of the expected message.\r\n if xpctd in self._lastmessage.keys():\r\n self._outgoing.append(self._lastmessage[xpctd])\r\n self._print(\"%s: Resending the last version of expected message '%s': '%s'\" % \\\r\n (self._clientnr, xpctd, self._lastmessage[xpctd]))\r\n else:\r\n self._print(\"%s: Do not have a last version of expected message '%s'\" % \\\r\n (self._clientnr, xpctd))\r\n else:\r\n # Add the message to the queue.\r\n self._print(\"%s: Adding message '%s' (t=%s) to the incoming queue\" \\\r\n % (self._clientnr, message, clienttime))\r\n self._incominglock.acquire()\r\n self._incoming.append(message)\r\n self._incominglock.release()\r\n # Chuck a message out if the queue is getting too long.\r\n if len(self._incoming) > self._maxincominglen:\r\n self._incominglock.acquire()\r\n delmsg = self._incoming.pop(0)\r\n self._incominglock.release()\r\n self._print(\"%s: Removed message '%s' from the incoming queue\" \\\r\n % (self._clientnr, delmsg))\r\n else:\r\n self._print(\"%s: Ignoring message '%s', as it wasn't for me (%s)\" \\\r\n % (self._clientnr, message, self._clientnr))\r\n else:\r\n self._print(\"%s: Ignoring message '%s', as it wasn't from the server (%s)\" \\\r\n % (self._clientnr, message, self._servernr))\r\n \r\n # Process outgoing commands.\r\n while len(self._outgoing) > 0:\r\n # Send a message to the server.\r\n self._outgoinglock.acquire()\r\n message = self._outgoing.pop(0)\r\n self._outgoinglock.release()\r\n self._print(\"%s: Sending '%s' to %s\" % \\\r\n (self._clientnr, message, self._servernr))\r\n msg = 'cmd,%s|%s' % (self._servernr, message)\r\n self.udp.sendWithTimeStamp(msg, '|')\r\n for i in range(self._message_reps):\r\n self.udp.sendWithTimeStamp(msg, '|')\r\n # Store the message in the 'last sent' dict.\r\n if '_' in message:\r\n m = message[:message.find('_')]\r\n else:\r\n m = message\r\n self._lastmessage[m] = message", "def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)", "def process_messages(self):\n pass", "def _dispatch_from_client_request(self):\n # Listen for client connection\n self._from_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._from_client_request], [], [self._from_client_request], 0.1)\n\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n\n client_name_read, _, _ = select([client_conn], [], [client_conn])\n if client_name_read:\n client_name = json.loads(client_name_read[0].recv(cfg.HEADER).decode('utf-8'))\n else:\n print(\"Connection closed\")\n continue\n\n self._thread_lock.acquire()\n self._from_client_connections[client_conn] = client_name\n self._state[client_name] = 0\n self._thread_lock.release()\n\n print(\"Receiving commands from [\" + client_name + \", \" + client_addr[0] + \", \" + str(client_addr[1]) + ']')", "def collector_process_data(self, data):\n for c in clients:\n c.on_message(json.dumps(data))", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client", "def handle_client(self, client_socket):\n while True:\n response = client_socket.recv(self.HEADER_LENGTH)\n if not response:\n continue\n message_length = int(response.decode(self.FORMAT))\n message = client_socket.recv(message_length)\n response = self.parse_message(message)\n utils.send_message(response, client_socket, self.HEADER_LENGTH, self.FORMAT)", "def _dispatch_to_client_request(self):\n # Listen for client connection\n self._to_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._to_client_request], [], [self._to_client_request], 0.1)\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n self._to_client_connections.append(client_conn)\n print(\"Sending replies to [\" + client_addr[0] + \", \" + str(client_addr[1]) + ']')", "def _process_messages(self, room, new_messages):\n\t\tfor message in new_messages:\n\t\t\tself._log.info(\"handling message {}\".format(message[\"id\"]))\n\n\t\t\tfor reactive in self._reactives:\n\t\t\t\ttry:\n\t\t\t\t\treactive(room, message, self, self._hipchat)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself._log.error(\"reactive {!r} errored while handling message\".format(reactive), exc_info=True)", "def _incoming_read(self, client, data, error):\n\n if error is not None:\n client.close()\n del self._incoming[client]\n return\n\n incoming = self._incoming[client]\n incoming.unpacker.feed(data)\n for req_id, message in incoming.unpacker:\n self._call_handler(\n partial(self._queue_response,\n client, req_id),\n self._call_interface.queue_call,\n message,\n )", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))", "def manage_read_request(self, client):\n\n # obtain the message\n message = client.recv()\n message = json.loads(message)\n msg = message[\"payload\"].strip()\n if msg.startswith(\"/\"):\n type = \"c2s\"\n elif msg.startswith(\"@\"):\n type = \"c2c\"\n else:\n type = \"c2g\"\n\n func = getattr(self, \"request_\"+type)\n func(client, message)\n # self.msg_map[message['type']](client, message)", "def _process_socket(self, client):\n itrans = self.inputTransportFactory.getTransport(client)\n otrans = self.outputTransportFactory.getTransport(client)\n iprot = self.inputProtocolFactory.getProtocol(itrans)\n oprot = self.outputProtocolFactory.getProtocol(otrans)\n try:\n while True:\n self.processor.process(iprot, oprot)\n except TTransport.TTransportException:\n pass\n except:\n log.error(traceback.format_exc())\n\n itrans.close()\n otrans.close()", "def receive_message(self, client):\n connected = True\n while connected:\n message = client.get_socket().recv(1024)\n message = message.decode('UTF-8')\n message = message.split(' ')\n\n if message[0] == Protocol.IDENTIFY.value:\n if len(message) > 1:\n self.change_client_name(message[1], client)\n else:\n self.send_message('No se especifico nombre.', client.get_socket())\n\n\n elif message[0] == Protocol.STATUS.value:\n if len(message) > 1:\n self.change_user_status(message[1], client)\n else:\n self.send_message('No se especifico status.', client.get_socket())\n\n\n elif message[0] == Protocol.MESSAGE.value:\n if len(message) > 2:\n message_to_user = message[1]\n userMessage = self.get_user_message(message, 2)\n self.send_direct_message(message_to_user, userMessage, client)\n else:\n if len(message == 1):\n self.send_message('No se especifico mensaje.', client.get_socket())\n else:\n self.send_message('No se especifico usuario ni mensaje.',\n client.get_socket())\n\n\n elif message[0] == Protocol.USERS.value:\n self.send_clients(client)\n\n\n elif message[0] == Protocol.PUBLICMESSAGE.value:\n if len(message) > 1:\n userMessage = self.get_user_message(message, 1)\n self.send_public_message(userMessage)\n else:\n self.send_message('No se especifico mensaje.', client.get_socket())\n\n\n elif message[0] == Protocol.CREATEROOM.value:\n if len(message) > 1:\n roomName = message[1]\n self.create_room(roomName, client)\n else:\n self.send_message('No se especifico nombre de la sala.', client.get_socket())\n\n\n elif message[0] == Protocol.INVITE.value:\n if len(message) > 2 and self.verify_chat_room_existance(roomName):\n roomName = message[1]\n users_verified = self.get_unique_users(message)\n sockets = self.get_sockets(users_verified)\n if len(sockets) > 0:\n self.invite_users(roomName, sockets, client)\n else:\n self.send_message('No existen los usuarios que quieres invitar.',\n client.get_socket())\n else:\n if len(message) == 2:\n self.send_message('No se especificaron los invitados a la sala.',\n client.get_socket())\n elif len(message) == 1:\n self.send_message('No se especifico el nombre de la sala ni los invitados.',\n client.get_socket())\n elif self.verify_chat_room_existance(roomName) == False:\n self.send_message('No existe una sala con ese nombre.',\n client.get_socket())\n\n\n elif message[0] == Protocol.JOINROOM.value:\n if len(message) > 1:\n roomName = message[1]\n if self.verify_chat_room_existance(roomName):\n room = self.get_room(roomName)\n self.join_room(client.get_socket(), room)\n else:\n self.send_message('La sala no existe.', client.get_socket())\n else:\n self.send_message('No se especifico la sala.', client.get_socket())\n\n\n elif message[0] == Protocol.ROOMMESSAGE.value:\n if len(message) > 2:\n roomName = message[1]\n if self.verify_chat_room_existance(roomName):\n room = self.get_room(roomName)\n roomMessage = self.get_user_message(message, 2)\n self.send_room_message(room, roomMessage)\n else:\n self.send_message('La sala no existe.', client.get_socket())\n else:\n if len(message) == 1:\n self.send_message('No se especifico el nombre de la sala ni el mensaje.',\n client.get_socket())\n else:\n self.send_message('No se especifico el mensaje.', client.get_socket())\n\n elif message[0] == Protocol.DISCONNECT.value:\n if len(message) == 1:\n self.send_message('Bye bye', client.get_socket())\n self.kill_client(client)\n client.get_socket().close()\n print('Acaba de desconectarse ', client.get_ip())\n connected = False\n else:\n self.send_message('Solo debes mandar DISCONNECT', client.get_socket())\n\n\n else:\n msg = ''\n msg += 'Mensaje invalido. A continuacion tiene la lista de mensajes validos: '\n msg += '| IDENTIFY nombre : para identificar usuario'\n msg += '| STATUS status: asigna estado al usuario: ACTIVE, BUSY o AWAY'\n msg += '| USERS: muestra los usuarios identificados'\n msg += '| MESSAGE usuario mensaje: envia mensaje privado al usuario'\n msg += '| PUBLICMESSAGE mensaje: envia el mensaje a todos los usuarios identificados'\n msg += '| CREATEROOM nombre: crea una sala con ese nombre, siendo el dueno el usario que la creo'\n msg += '| INVITE nombre usuario1 usuario2...: envia invitaciones a los usuarios para unirse a la sala'\n msg += '| JOINROOM nombre: acepta la invitacion a la sala que fuiste invitado'\n msg += '| ROOMMESSAGE nombre mensaje: envia mensaje a los usuarios de la sala'\n msg += '| DISCONNECT: te desconectas del servidor'\n self.send_message(msg, client.get_socket())\n\n if not message:\n break", "def _on_message(self, message):\n print(\"RECEIVED on \" + self.session_name + \":\")\n message_json = json.loads(message)\n print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))\n\n for singleMsg in message_json:\n self._process_message(singleMsg)", "def processMessage(self, msg):\r\n LOG(\"Received message: \" + msg.getId())\r\n \r\n # Process messages incoming from child executor, if any\r\n procId = msg[FIELD_PROC_ID]\r\n if procId != self.procId:\r\n if self.childManager.hasChild():\r\n self.childManager.processChildMessage(msg)\r\n else:\r\n LOG(\"Unexpected child message: \" + msg.getId(), LOG_ERROR)\r\n elif msg.getType() == MSG_TYPE_COMMAND:\r\n if msg.getId() == Messages.MSG_ADD_CLIENT:\r\n self.addClient(msg)\r\n elif msg.getId() == Messages.MSG_REMOVE_CLIENT:\r\n self.removeClient(msg)\r\n elif msg.getId() == Messages.CMD_CLOSE:\r\n self.cleanup()\r\n elif msg.getId() == Messages.CMD_RELOAD:\r\n REGISTRY['CIF'].clearAsRun()\r\n self.cleanup( executionOnly = True )\r\n self.setupResources()\r\n self.prepareExecution()\r\n else:\r\n cmdId = msg[\"Id\"]\r\n if cmdId in [ Messages.CMD_ABORT, Messages.CMD_PAUSE ]:\r\n self.mailbox.push( msg, high_priority = True )\r\n else:\r\n self.mailbox.push( msg )\r\n else:\r\n LOG(\"Unexpected message: \" + msg.getId() + \"/\" + msg.getType(), LOG_ERROR)", "def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass", "def handle(self):\n try:\n while True:\n\n # Pop the message from the queue\n\n msg = self.queue.get_nowait()\n\n # Log anything if necesary\n\n self.log_message(msg)\n\n # Identify the src peer\n\n if 'src_id' in msg:\n\n if msg['src_id'] == -1:\n\n this_peer = None # Server message\n\n else:\n\n this_peer = self.peers[msg['src_id']]\n\n # If we are not up-to-date with server, only accept MSG_CONNECT and MSG_SET_ALL\n\n if isinstance(msg, MSG_CONNECT):\n\n if self.marker.id != msg['src_id']:\n\n print(\"Peer '{}' has joined the session\".format(msg['name']))\n\n elif type(msg) == MSG_SET_ALL:\n\n # Set the contents of the text box\n\n self.handle_setall(msg['data'])\n\n # Move the peers to their position\n\n for _, peer in self.peers.items():\n \n peer.move(peer.row, peer.col)\n\n # self.mark_set(peer.mark, peer.index())\n\n # Format the lines\n\n self.format_text()\n\n # Move the local peer to the start\n\n self.marker.move(1,0)\n\n # Flag that we've been update\n\n self.is_up_to_date = True\n\n elif self.is_up_to_date:\n\n # If the server responds with a console message\n\n if isinstance(msg, MSG_RESPONSE):\n\n if hasattr(self.root, \"console\"):\n\n self.root.console.write(msg['string']) \n\n # Stop running when server is manually killed \n\n elif isinstance(msg, MSG_KILL):\n\n if hasattr(self.root, \"console\"):\n\n self.root.console.write(msg['string']) \n\n self.root.push.kill()\n self.root.pull.kill()\n\n # Handles selection changes\n\n elif isinstance(msg, MSG_SELECT):\n\n sel1 = str(msg['start'])\n sel2 = str(msg['end'])\n \n this_peer.select(sel1, sel2)\n\n # Handles keypresses\n\n elif isinstance(msg, MSG_DELETE):\n\n self.handle_delete(this_peer, msg['row'], msg['col'])\n\n self.root.colour_line(msg['row'])\n\n elif type(msg) == MSG_BACKSPACE:\n\n self.handle_backspace(this_peer, msg['row'], msg['col'])\n\n self.root.colour_line(msg['row'])\n\n elif isinstance(msg, MSG_EVALUATE_BLOCK):\n\n lines = (int(msg['start_line']), int(msg['end_line']))\n\n this_peer.highlightBlock(lines)\n\n # Experimental -- evaluate code based on highlight\n\n string = self.get(\"{}.0\".format(lines[0]), \"{}.end\".format(lines[1]))\n \n self.root.lang.evaluate(string, name=str(this_peer), colour=this_peer.bg)\n\n elif isinstance(msg, MSG_EVALUATE_STRING):\n\n # Handles single lines of code evaluation, e.g. \"Clock.stop()\", that\n # might be evaluated but not within the text\n\n self.root.lang.evaluate(msg['string'], name=str(this_peer), colour=this_peer.bg)\n\n elif isinstance(msg, MSG_SET_MARK):\n\n row = msg['row']\n col = msg['col']\n\n this_peer.move(row, col)\n\n # If this is a local peer, make sure we can see the marker\n\n if this_peer == self.marker:\n\n self.mark_set(INSERT, \"{}.{}\".format(row, col))\n\n self.see(self.marker.mark)\n\n elif isinstance(msg, MSG_INSERT):\n\n self.handle_insert(this_peer, msg['char'], msg['row'], msg['col'])\n\n # Update IDE keywords\n\n self.root.colour_line(msg['row'])\n\n # If the msg is from the local peer, make sure they see their text AND marker\n\n if this_peer == self.marker:\n\n self.see(self.marker.mark)\n\n self.edit_separator()\n\n elif isinstance(msg, MSG_GET_ALL):\n\n # Return the contents of the text box\n\n data = self.handle_getall()\n\n reply = MSG_SET_ALL(-1, data, msg['src_id'])\n\n self.root.push_queue.put( reply ) \n\n elif isinstance(msg, MSG_REMOVE):\n\n # Remove a Peer\n this_peer.remove()\n \n del self.peers[msg['src_id']]\n \n print(\"Peer '{}' has disconnected\".format(this_peer)) \n\n elif isinstance(msg, MSG_BRACKET):\n\n # Highlight brackets on local client only\n\n if this_peer.id == self.marker.id:\n\n row1, col1 = msg['row1'], msg['col1']\n row2, col2 = msg['row2'], msg['col2']\n\n peer_col = int(self.index(this_peer.mark).split(\".\")[1])\n\n # If the *actual* mark is a ahead, adjust\n\n col2 = col2 + (peer_col - col2) - 1\n\n self.tag_add(\"tag_open_brackets\", \"{}.{}\".format(row1, col1), \"{}.{}\".format(row1, col1 + 1))\n self.tag_add(\"tag_open_brackets\", \"{}.{}\".format(row2, col2), \"{}.{}\".format(row2, col2 + 1))\n\n elif type(msg) == MSG_CONSTRAINT:\n\n new_name = msg['name']\n\n print(\"Changing to constraint to '{}'\".format(new_name))\n\n for name in self.root.creative_constraints:\n\n if name == new_name:\n\n self.root.creative_constraints[name].set(True)\n self.root.__constraint__ = constraints[name](msg['src_id'])\n\n else:\n\n self.root.creative_constraints[name].set(False)\n\n elif type(msg) == MSG_SYNC:\n\n # Set the contents of the text box\n\n self.handle_setall(msg['data'])\n\n # Move the peers to their position\n\n for _, peer in self.peers.items():\n \n peer.move(peer.row, peer.col)\n\n # Format the lines\n\n self.format_text()\n\n elif type(msg) == MSG_UNDO:\n\n self.handle_undo()\n\n # Give some useful information about what the message looked like if error\n\n else:\n\n print(\"Error in text box handling. Message was {}\".format(msg.info()))\n\n raise e\n\n # Update any other idle tasks\n\n self.update_idletasks()\n\n # This is possible out of date - TODO check\n\n if msg == self.root.wait_msg:\n\n self.root.waiting = False\n self.root.wait_msg = None\n self.root.reset_title()\n\n self.refreshPeerLabels()\n\n # Break when the queue is empty\n except queue.Empty:\n \n self.refreshPeerLabels()\n\n # Recursive call\n self.after(30, self.handle)\n return", "def process_frontend_msg(self):\n logging.debug('Received message on the frontend socket')\n\n _id = self.frontend_socket.recv()\n _empty = self.frontend_socket.recv()\n msg = self.frontend_socket.recv_json()\n\n logging.debug('ID: %s', _id)\n logging.debug('Message: %s', msg)\n\n if not isinstance(msg, dict):\n self.frontend_socket.send(_id, zmq.SNDMORE)\n self.frontend_socket.send(\"\", zmq.SNDMORE)\n self.frontend_socket.send_json({ 'success': -1, 'msg': 'Request message should be in JSON format' })\n return\n\n logging.debug('Generating client id for result collecting')\n\n # Generate a service request id for our client and ask them to\n # subscribe to the result publisher endpoint in order to receive\n # their results\n req_id = uuid.uuid4().get_hex()\n self.frontend_socket.send(_id, zmq.SNDMORE)\n self.frontend_socket.send(\"\", zmq.SNDMORE)\n self.frontend_socket.send_json({'uuid': req_id, 'port': self.result_pub_port})\n \n logging.debug('Client service request id is: %s', req_id)\n \n # The message we send to the backend also contains the client\n # service request id as well. This is done so later when we receive\n # the results in the sink we can route the results to the clients properly\n msg['uuid'] = req_id\n\n logging.debug('Sending message to backend for processing')\n \n self.backend_socket.send_unicode(msg['topic'], zmq.SNDMORE)\n self.backend_socket.send_json(msg)", "def handle_messages(self):\n\n #Get the time at which the code started running\n current_time = datetime.datetime.now()\n\n #get all messages between now and the time where a message was last received\n messages = self.client.messages.list(\n date_sent_before = datetime.datetime.now()+ datetime.timedelta(hours = TIMEDIFFERENCE),\n date_sent_after = self.last_message_timing + datetime.timedelta(hours = TIMEDIFFERENCE)\n )\n\n #Iterate through all the new messages\n for record in messages:\n #If it is not from the Twilio Client\n if record.from_ != 'whatsapp:+14155238886':\n #Then update the timing of the last message to the current time\n self.last_message_timing = current_time\n #If the message sent is the '?' that seeks to get the number\n #of people in the queue\n if record.body == '?':\n #Get the data about people from firebase\n people_data = self.firebase.get_data('people_count')\n #Get the number of people queueing\n no_of_people = people_data['people_count']\n #Create a message from the API to tell the person\n #asking the number of people in the queue\n message = self.client.messages.create(\n body='The number of the people in the queue is {}'.format(no_of_people),\n from_='whatsapp:{sender_number}'.format(**self.config),\n to=record.from_\n )", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def recv_messages(self):\n while True:\n b = unwrap_read(self.sock.recv(4096))\n msgs = self.parser.feed(b)\n if msgs:\n for msg in msgs:\n self.router.incoming(msg)\n return", "def handle_recv(self,stream,msgs):\n pass", "def handle_stream_client(self, event):\n try:\n while True:\n client_req = self.receive_streaming_msg()\n self.choose_action(client_req[ZERO], client_req[ONE:], event)\n except socket.error as e:\n print('stream', e)", "def handle_client(client): # Takes client socket as argument.\n\tname = client.recv(2048).decode(\"utf8\")\n\twelcome = 'Welcome %s! Enter {quit} to exit.' % name\n\ttry:\n\t\tclient.send(bytes(welcome, \"utf8\"))\n\t\tmsg = \"%s: has joined the chat!\" % name\n\t\tbroadcast(bytes(msg, \"utf8\"))\n\t\tclients[client] = name\n\t\ttemp_client = {'Address':addresses[client],'Name':clients[client]}\n\t\tactive.append(temp_client)\n\t\tbroadcast(bytes(str(active),'utf-8'))\n\t\twhile True:\n\t\t\tmsg = client.recv(2048)\n\t\t\ttry:\n\t\t\t\tif '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n\t\t\t\t\ttemp = msg.decode('utf-8').split(')')\n\t\t\t\t\taddress = temp[0] + ')'\n\t\t\t\t\tprivate_message(address,temp[1])\n\t\t\t\telif msg != bytes(\"{quit}\", \"utf8\"):\n\t\t\t\t\tbroadcast(msg, \"<global>\" + name + \": \")\n\t\t\t\t\tprint(client)\n\t\t\t\telse:\n\t\t\t\t\t#client.send(bytes(\"{quit}\", \"utf8\"))\n\t\t\t\t\tclient.close()\n\t\t\t\t\tactive.remove({'Address':addresses[client],'Name':clients[client]})\n\t\t\t\t\tdel clients[client]\n\t\t\t\t\tbroadcast(bytes(\"%s has left the chat.\" % name, \"utf8\"))\n\t\t\t\t\tbroadcast(bytes(str(active),'utf-8'))\n\t\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tprint(msg)\n\t\t\t\tbroadcast_file(msg)\n\texcept Exception as e:\n\t\tprint(e)", "def client_message_handler(self, message, client):\n LOG.debug(f\"Разбираем сообщение: {message}\")\n if (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_PRESENCE\n and s.KEY_TIME in message\n and s.KEY_USER in message\n ):\n if message[s.KEY_USER][s.KEY_ACCOUNT_NAME] not in self.names.keys():\n self.names[message[s.KEY_USER][s.KEY_ACCOUNT_NAME]] = client\n MSG.send(client, s.RESPONSE_200)\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Имя пользователя уже занято.\"\n MSG.send(client, response)\n self.clients.remove(client)\n client.close()\n return\n # Если это сообщение, то добавляем его в очередь сообщений.\n # Ответ не требуется.\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_MESSAGE\n and s.KEY_TIME in message\n and s.KEY_TO in message\n and s.KEY_FROM in message\n and s.KEY_MESSAGE in message\n ):\n self.messages.append(message)\n return\n # Если клиент выходит\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_EXIT\n and s.KEY_ACCOUNT_NAME in message\n ):\n self.clients.remove(self.names[message[s.KEY_ACCOUNT_NAME]])\n self.names[message[s.KEY_ACCOUNT_NAME]].close()\n del self.names[message[s.KEY_ACCOUNT_NAME]]\n return\n # Иначе отдаём Bad request\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Запрос не корректен\"\n MSG.send(client, response)\n return", "def handle_clients(self):\n done = False\n while not done:\n try:\n listening_socket, address = self.listen_socket.accept()\n print(\"listen socket connect: {}\".format(listening_socket))\n name = self.receive_mes(listening_socket)\n # add to options:\n self.client_dict[name] = listening_socket\n # gets string of connected contacts\n options = ','.join(self.client_dict.keys())\n print(options)\n # sends options to client:\n self.send_mes(options.encode(), listening_socket)\n users_thread = threading.Thread(target=self.users)\n users_thread.start()\n client_thread = threading.Thread(target=self.handle_call)\n client_thread.start()\n\n except socket.error as msg:\n print(\"socket failure: \", msg)\n done = True\n except Exception as msg:\n print(\"exception: \", msg)\n done = True", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "def handle(self):\n req_lines = self._read_lines()\n if not req_lines:\n self.cleanup()\n for req in req_lines:\n log.debug('%s => %s', self.client, req)\n req = req.split()\n cmd = req.pop(0)\n try:\n self.get_command(cmd)(req)\n result = [OK]\n except Exception as error:\n result = [ERROR, error.message]\n self.send_line(' '.join(result))\n self.flush()", "def handle_client_recv(sock, addr):\n\n rest = bytes()\n while True:\n try:\n (msgs, rest) = recv_msg(sock, rest)\n except (EOFError, ConnectionError):\n handle_disconnect(sock, addr)\n break\n for msg in msgs:\n print('{}: {}'.format(addr, msg))\n \n # Handle first message. Set clients name.\n with lock:\n client_name = clients[sock.fileno()]['name']\n if client_name is None:\n clients[sock.fileno()]['name'] = msg\n else:\n \"\"\" Add message to each connected client's send queue \"\"\"\n msg = '{}: {}'.format(client_name, msg)\n for i in clients:\n clients[i]['queue'].put(msg)", "def messageHandler(self):\n\n while len(self.ReceiveMessageBuffer) > 0: # if message handler is called all received messages will be processed\n #print 'entered message handler of ID {0}'.format(self.CommID)\n msg = self.ReceiveMessageBuffer.popleft()\n self.MsgReceiveCount += 1\n self.MsgReceiveCount_interval += 1\n type = msg.getType()\n # for communication test:\n if type == 0: #System message\n print 'ID {0} has received msg {1} from ID {2}'.format(self.CommID, msg.getData(), msg.getIDSender())\n # send reply\n data = msg.getData()\n if data == 'ping':\n retval = self.sendMessage(msg.getIDSender(), 0, 'pong')\n return retval\n elif data == 'pong':\n retval = self.sendMessage(msg.getIDSender(), 0, 'ping')\n return retval\n # elif data[0] == 'system':\n # if(data[1] == 'startRONOPT'):\n # #save fluctuation curve of cluster\n # self.EFluctuationCurve = data[4]\n # #begin with local optimization (data[2] = fromTime, data[3]=toTime)\n # self.stateRONOPT = 0\n # for n in range(len(self.Neighbors)):\n # self.NeighborMessageRec[n] = 0\n # self.RemainderOfNeighborsOpt(data[2],data[3],1)\n #########################################################################################################\n\n elif type == 20: # pseudo tree generation message\n ret = self.messageHandler_PseudoTree(msg)\n if ret == -1:\n break\n\n elif type == 40: # load propagation message\n self.messageHandler_LoadProp(msg)\n\n elif type == 70:\n self.messageHandler_RemainderMulticast(msg) #remainder multicast optimization\n\n return 0", "def handleMessage(msg):", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def _handle_client(self, client_reader, client_writer):\n while True:\n data = (yield from client_reader.readline()).decode(\"utf-8\")\n if not data: # an empty string means the client disconnected\n break\n cmd, *args = data.rstrip().split(' ')\n if cmd == 'add':\n arg1 = float(args[0])\n arg2 = float(args[1])\n retval = arg1 + arg2\n client_writer.write(\"{!r}\\n\".format(retval).encode(\"utf-8\"))\n elif cmd == 'repeat':\n times = int(args[0])\n msg = args[1]\n client_writer.write(\"begin\\n\".encode(\"utf-8\"))\n for idx in range(times):\n client_writer.write(\"{}. {}\\n\".format(idx+1, msg)\n .encode(\"utf-8\"))\n client_writer.write(\"end\\n\".encode(\"utf-8\"))\n else:\n print(\"Bad command {!r}\".format(data), file=sys.stderr)\n\n # This enables us to have flow control in our connection.\n yield from client_writer.drain()", "def on_messages(self, msg_list):\n self.stats.on_pack_recv(len(msg_list))\n\n for msg in msg_list:\n if self.state == OPEN:\n self.conn.on_message(msg)", "def process_messages(self):\n for each_message in self.unprocessed_messages:\n if not ( 'message_type' in each_message):\n logging.error(\"(%s:%d) invalid message found...ignoring the message\",\\\n self.ip, self.port)\n else:\n if ( each_message['message_type'] is 'unchoke'):\n self.is_choking = 0\n elif ( each_message['message_type'] is 'choke'):\n self.is_choking = 1\n elif ( each_message['message_type'] is 'interested'):\n self.is_interested = 1\n elif ( each_message['message_type'] is 'not interested'):\n self.is_interested = 0\n elif ( each_message['message_type'] is 'have'):\n self.pieces.append(each_message['piece_index'])\n elif ( each_message['message_type'] is 'bitfield'):\n bitfield = each_message['bitfield']\n for index, each_bit in enumerate(bitfield):\n if ( each_bit is '1'):\n self.pieces.append(index)", "def handleMessage(self, channels, sender, code, datagram):\n self.stateServer.handle(channels, sender, code, datagram)\n self.clientAgent.handle(channels, sender, code, datagram)\n self.databaseServer.handle(channels, sender, code, datagram)", "def handle_client(self):\n e = threading.Event()\n reg_t = threading.Thread(target=self.handle_reg_client, args=(e,))\n stream_t = threading.Thread(target=self.handle_stream_client,\n args=(e,))\n reg_t.start()\n stream_t.start()", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def processMessage(self, *args, **kwargs):\r\n pass", "def processIncoming(self):\n while (self.queue.qsize()):\n try:\n message = self.queue.get_nowait()\n \n self.terminal.insert(END,message)\n\n # Autoscroll the terminal if set\n if (self.autoscroll_value.get()):\n self.terminal.yview(END)\n\n except Queue.Empty:\n pass", "def handle_client(self, conn):\r\n\r\n while True:\r\n # Receive message\r\n msg = conn.recv(1024).decode()\r\n res = self.validateCommand(msg)\r\n\r\n print(res)\r\n\r\n # Send response\r\n conn.sendall(res.encode())\r\n\r\n if msg == '/exit':\r\n break\r\n\r\n # Close client connection\r\n print('Client disconnected...')\r\n conn.close()", "def handle_client(client): # Takes client socket as argument.\r\n name = client.recv(BUFSIZ).decode(\"utf8\")\r\n welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name\r\n client.send(bytes(welcome, \"utf8\"))\r\n msg = \"%s has joined the chat!\" % name\r\n broadcast(bytes(msg, \"utf8\"))\r\n clients[client] = name\r\n while True:\r\n msg = client.recv(BUFSIZ)\r\n if msg != bytes(\"{quit}\", \"utf8\"):\r\n broadcast(msg, name+\": \")\r\n else:\r\n client.send(bytes(\"{quit}\", \"utf8\"))\r\n client.close()\r\n del clients[client]\r\n broadcast(bytes(\"%s has left the chat.\" % name, \"utf8\"))\r\n break", "def handle_client_data(self, data, client_sock):\n prot = data[0].lower()\n if prot == \"n\":\n # Sent by the central server when a new node joins\n peer = json.loads(data[1:])\n self._worker.add_peer(peer)\n client_sock.close()\n elif prot == \"b\":\n self._handle_block(data, client_sock)\n elif prot == \"t\":\n self._handle_transaction(data, client_sock)\n elif prot == \"r\":\n self._handle_transaction_proof(data, client_sock)\n elif prot == \"x\":\n self._handle_balance(data, client_sock)\n else:\n # either header or wrong message format\n client_sock.close()", "def recv(self, *messages):\n for message in messages:\n self.input.put(message)", "def handle_client(client): # Takes client socket as argument.\n\tr_packet = client.recv(BUFSIZ).decode(\"utf8\")\n\tar_packet = r_packet\n\tr_packet = r_packet.split(\"~\")\n\n\tfor sock in clients:\n\t\tif(clients[sock] == r_packet[0]):\n\t\t\tsock.send(bytes(ar_packet,\"utf8\"))", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def handle(self):\r\n assert self.prepared, \"You have to call prepare before handle\"\r\n rset, wset, xset = self._select()\r\n for readable in rset:\r\n if readable == self._read.fileno():\r\n # don't care i just need to clean readable flag\r\n self._read.recv(1024) \r\n elif readable == self.socket.handle.fileno():\r\n client = self.socket.accept().handle\r\n self.clients[client.fileno()] = Connection(client, self.wake_up)\r\n else:\r\n connection = self.clients[readable]\r\n connection.read()\r\n if connection.status == WAIT_PROCESS:\r\n itransport = TTransport.TMemoryBuffer(connection.message)\r\n otransport = TTransport.TMemoryBuffer()\r\n iprot = self.in_protocol.getProtocol(itransport)\r\n oprot = self.out_protocol.getProtocol(otransport)\r\n self.tasks.put([self.processor, iprot, oprot, \r\n otransport, connection.ready])\r\n for writeable in wset:\r\n self.clients[writeable].write()\r\n for oob in xset:\r\n self.clients[oob].close()\r\n del self.clients[oob]", "def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)", "def handle_send_messages():\n items = {k: v for k, v in subscribers.items() if v}\n for key in items:\n subscriber_obj = items[key]\n sim_id = get_sim_id(subscriber_obj)\n if sim_id and type(sim_id) is int:\n frame_messenger(subscriber_obj)\n elif sim_id and sim_id == \"live\":\n live_messenger(subscriber_obj)", "def loop(self):\n # dump all incoming messages into a list and empty the string\n incoming_messages = self.receiver.getDataFromCallback()\n # empty the buffer\n self.receiver.emptyDataFromCallback()\n\n parsed_messages = []\n pingacks = []\n for message in incoming_messages:\n # Deal with ping requests\n\n if message.topic == self.PINGREQ:\n self.pingack(json.loads(message.payload.decode()))\n # Deal with acknowledgements to our own ping requests\n elif message.topic == self.PINGACK:\n pingacks.append(json.loads(message.payload.decode()))\n # Parse non-encrypted messages\n elif message.topic == self.PUBLIC:\n parsed_messages.append(json.loads(message.payload.decode()))\n\n return parsed_messages, pingacks", "def process_message(self, msg, src):", "def run(self):\n\n\t\t#Begin running the clientHandler\n\t\tself.running = True\n\t\tself.rxThread.start()\n\n\t\twhile self.running:\n\t\t\ttime.sleep(0.1)\n\t\n\t\t\t#Keep a count of the number of missing Hello requests, over 5 kill client\n\t\t\tif self.missingCount >= 5:\n\t\t\t\tself.running = False", "def start(self):\n while True:\n ident = self.reply_socket.recv()\n assert self.reply_socket.rcvmore(), \"Missing message part.\"\n msg = self.reply_socket.recv_json()\n omsg = Message(msg)\n print>>sys.__stdout__\n print>>sys.__stdout__, omsg\n handler = self.handlers.get(omsg.msg_type, None)\n if handler is None:\n print >> sys.__stderr__, \"UNKNOWN MESSAGE TYPE:\", omsg\n else:\n handler(ident, omsg)", "def handle_clients(self):\n done = False\n while not done:\n try:\n # starts threads\n receive_video_client_socket, address = \\\n self.receive_video_socket.accept()\n print(\"connected relay video: {}\"\n .format(receive_video_client_socket))\n video_thread = \\\n threading.Thread(target=self.start_video_relay,\n args=(receive_video_client_socket, ))\n audio_thread = threading.Thread(target=self.start_audio_relay)\n video_thread.start()\n audio_thread.start()\n\n except socket.error as msg:\n print(\"socket failure handle clients: \", msg)\n done = True\n except Exception as msg:\n print(\"exception handle clients: \", msg)\n done = True", "def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])", "def process_inc(self):\n while True:\n # if connected\n if self.connected:\n # if socket is closed, reset\n if self.sock._closed:\n self.connack_rec = 0\n self.disconnect()\n # try to get a message from queue\n try:\n msg = self.recv_q.get_nowait()\n # convert from bytes to string\n msg = msg.decode(\"utf-8\")\n # analyze frame\n frame = Message.Frame(msg)\n\n # check frame type\n hd = frame.header.lower()\n\n # check if frame is good, otherwise disregard it\n if hd == \"error\" or hd == \"base\":\n pass\n # if ack frame\n elif hd == \"ack\":\n # process it\n self.process_ack(Message.AckFrame(msg))\n # if pub frame\n elif hd == \"pub\":\n # process it\n self.process_data(Message.PublishFrame(msg))\n\n except queue.Empty:\n # if no messages, do nothing\n pass", "def run(self):\n\n print('Listening for client connections...')\n\n while not self.shutdownEvent.is_set():\n readyToRead, readyToWrite, inputError = select.select(self._socketList, [], [], self._selectTimeout)\n\n # Iterate over input sockets\n for sock in readyToRead:\n # Received new connection request\n if sock is self._serverSocket:\n print('Received connection request. Establishing connection with client.')\n\n # Accept the connection and append it to the socket list\n clientSocket, address = self._serverSocket.accept()\n\n #TODO: Add this if there's a timeout blocking issue, or make the sockets non-blocking\n #clientSocket.settimeout(0.5)\n\n self._socketListMutex.acquire()\n\n try:\n self._socketList.append(clientSocket)\n finally:\n self._socketListMutex.release()\n # Received message from client\n else:\n # Read a message off of the socket\n msgData = MessageHandler.recvMsg(sock)\n\n # Process the message\n if msgData is not None:\n self.__processMsg(sock, msgData)\n # The client disconnected\n else:\n print('Client disconnected')\n\n self._socketListMutex.acquire()\n\n try:\n self._socketList.remove(sock)\n finally:\n self._socketListMutex.release()\n\n sock.close()\n\n # Cleanup\n self.__shutdown()", "def process(self, message=None):\n\n while self.running:\n message = self.channel.basic.get(self.queue)\n if message:\n content = message.body\n\n # log message\n if self.debug:\n self.log(\"Recieved: \" + str(content))\n\n # send to child nodes\n self.scatter(Message(**self.parse(content)))\n else:\n # yield to other greenlet\n # self.tick()\n self.sleep(1)", "def __receive_messages(self) -> [str]:\n while True:\n try:\n data = self.__socket.recv(4096)\n if data:\n msgs = self.__json_serializer.bytes_to_jsons(data)\n if RemotePlayerProxy.DEBUG:\n for msg in msgs:\n print(f'[RPP] [RECV] <- [{self.name}]: {msg}')\n return msgs\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(f'Lost client {self.name} because: ', e)\n return []", "def run(self):\n while True:\n socks = select.select(self.sockets.values(), [], [], 0.1)[0]\n for conn in socks:\n try:\n k = conn.recv(65535)\n except:\n # either died on a connection reset, or was SIGTERM's by parent\n return\n if k:\n for sock in self.sockets:\n if self.sockets[sock] == conn:\n srcif = sock\n msg = json.loads(k)\n\n # DEBUG STATEMENTS\n if False:\n print(\"<--------------------------------->\")\n print(\"[NEW MESSAGE INCOMING]\")\n print(\"SOURCE:\", srcif)\n print(\"MSG:\", msg)\n print(\"<--------------------------------->\")\n\n if not self.handle_packet(srcif, msg):\n self.send_error(conn, msg)\n else:\n print (\"ROUTES:\", self.routes)\n return", "def handle_process(self, connection):\n client_address = connection.getpeername()\n self.HandlerClass(connection, client_address)", "def run(self):\n\n try:\n while True:\n self.log.info(\"Waiting for a connection...\")\n self.mc.events.post('client_disconnected')\n self.connection, client_address = self.socket.accept()\n\n self.log.info(\"Received connection from: %s:%s\",\n client_address[0], client_address[1])\n self.mc.events.post('client_connected',\n address=client_address[0],\n port=client_address[1])\n\n # Receive the data in small chunks and retransmit it\n while True:\n try:\n data = self.connection.recv(4096)\n if data:\n commands = data.split(\"\\n\")\n for cmd in commands:\n if cmd:\n self.process_received_message(cmd)\n else:\n # no more data\n break\n\n except:\n if self.mc.config['mediacontroller']['exit_on_disconnect']:\n self.mc.shutdown()\n else:\n break\n\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join(line for line in lines)\n self.mc.crash_queue.put(msg)", "def _hear_message_from_server(self):\n while self.is_alive:\n data = self._socket.recv(1024)\n content = loads(data)\n self._current_list = content\n print(\"Servidor: {}\".format(content))", "def handle_clients_connection():\n global server_socket\n global players\n # setting client's queue length\n server_socket.listen(5)\n print \"server online. waiting for players\"\n\n while len(players) < 2:\n _accept_client(server_socket)\n\n print \"there are 2 players. waiting for more...?\"\n\n while len(players) < 4:\n send_msg_to_players(players, \"we are %d players. more players?\" % len(players))\n print \"we are %d players. more players?\" % len(players)\n if \"yes\" in receive_msg_from_players():\n _accept_client(server_socket)\n else:\n send_msg_to_players(players, \"the final number of players is %d\" % len(players))\n print \"the final number of players is %d\" % len(players)\n break\n\n send_msg_to_players(players,\"start game\")\n print \"start game\"", "def handle_client(client):\n\n msg = client.recv(BUFSIZ).decode(\"utf8\")\n msg_split = msg.split(\"@\")\n\n name = msg_split[1]\n vote = msg_split[2]\n\n #Enviar uma mensagem de agradecimento para o cliente\n thankYouMessage = \"Voto computado. Obrigado, \" + name + \"!\"\n client.send(bytes(thankYouMessage, \"utf8\"))\n\n clients[client] = name\n votes.append(vote)\n\n #Computar votos\n counts = np.bincount(votes)\n\n winnerCandidate = np.argmax(counts)\n winnerCandidateVotes = counts[winnerCandidate]\n\n msg = \"O candidato da legenda \" + str(winnerCandidate) + \" vence por \" + str(winnerCandidateVotes) + \" voto(s).\"\n\n broadcast(msg)", "def handle_client(client): # Takes client socket as argument.\n\n name = client.recv(BUFSIZ).decode(\"utf8\")\n welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name\n client.send(bytes(welcome))\n msg = \"%s has joined the chat!\" % name\n broadcast(bytes(msg))\n clients[client] = name\n\n while True:\n msg = client.recv(BUFSIZ)\n if msg == bytes(\"{quit}\"):\n client.send(bytes(\"{quit}\"))\n client.close()\n del clients[client]\n broadcast(bytes(\"%s has left the chat.\" % name))\n break\n elif msg[0:7] == bytes(\"{emoji}\"):\n broadcast(msg, \"\")\n else:\n broadcast(msg, name + \": \")", "def read_requests(self, clients_for_reading, all_clients):\n messages = []\n for sock in clients_for_reading:\n try:\n message = self.get_message(sock)\n print(message)\n messages.append(message)\n except:\n print('Client {} {} has disconnected'.format(sock.fileno(), sock.getpeername()))\n all_clients.remove(sock)\n\n return messages", "def listen_clients(self, channel, address):\n print(\"New Connection established\")\n try:\n message = channel.recv(1024)\n # Write procedure to process data here\n print(\"Message received: \" + str(message.decode()))\n # Send again\n thesaurus = self.thesaurus(message.decode())\n channel.send(thesaurus.encode('utf-8'))\n print(\"Message response sent\")\n\n # Close channel after data transfer is done.\n channel.close()\n except ConnectionResetError:\n print(\"Threads limit reached\")\n print(\"Supports up to 5 threads currently\")\n sys.exit(0)\n except ConnectionRefusedError:\n print(\"Client is not running\")\n sys.exit(0)", "def tcp_incoming_connections():\n while True:\n client, client_address = SERVER.accept()\n print(\"%s:%s has connected.\" % client_address)\n client.send(bytes(\"Greetings from the cave! Now type your name and press enter!\"))\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()", "def __processMsg(self, sock, msgData):\n\n pass", "def handle_msg(s: socket, inputs: list[socket], server: socket):\n buff = bytes()\n # Put the socket in a list to pass it to select with timeout\n s_list = [s]\n\n while True:\n readable, writable, exceptional = select(s_list, [], [], 10)\n if len(readable) == 0:\n print(\"[x] Invalid petition\")\n return\n\n data = s.recv(1024)\n\n if len(data) == 0:\n return\n\n buff += data\n slice_obj = slice(-1, -5, -1)\n\n last_chars = buff[slice_obj]\n\n if last_chars.decode() == \"\\n\\r\\n\\r\":\n break\n\n petition = Petition(data.decode())\n\n if len(petition.method) != 0:\n if petition.method == 'GET':\n handle_get(s, petition)\n\n elif petition.method == 'POST':\n handle_post(s, petition)\n\n elif petition.method == 'DELETE':\n handle_delete(s, petition)\n\n if not petition.keep_alive:\n addr, port = s.getpeername()\n print(f' closing {addr}:{port}', file=stderr)\n # Stop listening for input on the connection\n inputs.remove(s)\n s.close()", "def handle_message(self, message):", "def on_message(self, client, userdata, msg):\n\n # that was the big fault. calling client.publish under the on_message() create a conflict!!\n # I found the solution via using different ports. And so I don,'t need use this following line here.\n # self.publish(self.topic, msg.payload) # for directly return the incoming message\n msg.payload = msg.payload.decode(\"utf-8\")\n\n self.incoming_message['message'] = json.loads(msg.payload) # json converting cause of mqtt's data transfer limit.\n self.incoming_message['is_used'] = False", "def process_messages(self, messages):\n\n return messages", "def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)", "def handleIncoming(self):\r\n\t\trawQueue = list()\r\n\r\n\t\twhile True:\r\n\t\t\tif not self.activeConnection:\r\n\t\t\t\ttime.sleep(.1)\r\n\t\t\t\tcontinue\r\n\t\t\ttry:\r\n\t\t\t\trawQueue.append(self.serialPort.read(1).decode('ascii'))\r\n\t\t\texcept serial.serialutil.SerialException as e:\r\n\t\t\t\tcontinue\r\n\t\t\t# print(rawQueue[-1], int.from_bytes(rawQueue[-1], byteorder='big'))\r\n\t\t\t# if len(rawQueue) >= 1000:\r\n\t\t\t# \trawQueue.pop(0)\r\n\t\t\t# print(rawQueue)\r\n\t\t\tif rawQueue[0] != '$': # we pop items until the first one is a $ sign\r\n\t\t\t\t# print('popping the first character')\r\n\t\t\t\trawQueue.pop(0)\r\n\t\t\tif '\\n' in rawQueue: # we assume with the \\n we have a valid message\r\n\t\t\t\t# print('valid message')\r\n\t\t\t\trawQueue.pop(0) # remove the $\r\n\t\t\t\trawPayload = rawQueue[0:rawQueue.index(\"*\")]\r\n\t\t\t\tstringPayload = \"\".join(rawPayload)\r\n\t\t\t\tvalueList = stringPayload.split(\",\")\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\tfor i in range(1, len(valueList)):\r\n\t\t\t\t\tvalueList[i] = int(valueList[i])\r\n\t\t\t\tvalueList[0] = messageTypes[valueList[0]]\r\n\r\n\t\t\t\tself.eventQueue.put(valueList)\r\n\t\t\t\trawQueue.clear()\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\t# we are going to ignore checksums for now\r", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def handle_reg_client(self, event):\n try:\n while True:\n client_req = self.receive_msg()\n self.choose_action(client_req[ZERO], client_req[ONE:], event)\n except socket.error as e:\n print(e)", "def handle(self, message):", "def flush(self):\n # TODO: use socketserver or something different.\n # We are very limited by select here\n \n r, w, x = select.select([self.messageDirector.sock, self.clientAgent.sock] + list(self.clients), [], [], 0)\n for sock in r:\n if sock == self.messageDirector.sock:\n sock, addr = sock.accept()\n self.clients[sock] = MDClient(self.messageDirector, sock, addr)\n self.messageDirector.clients.append(self.clients[sock])\n \n elif sock == self.clientAgent.sock:\n sock, addr = sock.accept()\n self.clients[sock] = Client(self.clientAgent, sock, addr)\n self.clientAgent.clients.append(self.clients[sock])\n \n else:\n client = self.clients[sock]\n try:\n data = sock.recv(2048)\n except socket.error:\n data = None\n \n if not data:\n del self.clients[sock]\n \n if type(client) == MDClient:\n self.messageDirector.clients.remove(client)\n \n elif type(client) == Client:\n self.clientAgent.clients.remove(client)\n \n client.onLost()\n \n else:\n client.onData(data)", "def serveClient(self, client):\r\n itrans = self.inputTransportFactory.getTransport(client)\r\n otrans = self.outputTransportFactory.getTransport(client)\r\n iprot = self.inputProtocolFactory.getProtocol(itrans)\r\n oprot = self.outputProtocolFactory.getProtocol(otrans)\r\n\r\n try:\r\n while True:\r\n self.processor.process(iprot, oprot)\r\n except TTransportException, tx:\r\n pass\r\n except Exception, x:\r\n logging.exception(x)\r\n\r\n itrans.close()\r\n otrans.close()", "def handle_client(client): # Takes client socket as argument.\n \n StepNum = 0\n \n\n ClientState = LOGIN_STATE\n ChatRoom = None\n name = client.recv(BUFSIZ).decode(\"utf8\")\n print(name)\n onlineUsers.append(name)\n\n while True:\n msg = client.recv(BUFSIZ).decode(\"utf8\") \n if msg == \"{quit}\":\n client.send(bytes(\"{quit}\", \"utf8\"))\n client.close()\n if ClientState != LOGIN_STATE : \n x = [ChatRooms[ChatRoom].remove((client,_tempname)) for (client,_tempname) in ChatRooms[ChatRoom] if _tempname == name ]\n onlineUsers.remove(name)\n print(msg)\n print(ChatRoom)\n if not ChatRoom == None : broadcast(bytes(\"%s has left the chat.\" % name, \"utf8\"),\"\",ChatRoom)\n break\n else :\n if(ClientState == LOGIN_STATE):\n if(StepNum == 0):\n #Receive ClientCertificate\n print(msg)\n #Verify Certificate, Generate Nonce and encrypt with Client Public Key\n #Sends Server Certificate and Nonce encrypted with Client Public Key\n client.send(bytes(\"Server Certificate and encrypted Server Nonce\",\"utf8\"))\n print(\"Sending Server Certificate and encrypted Server Nonce\")\n StepNum+=1\n elif(StepNum == 1):\n print(msg)\n #Receive Server Nonce Response, Client Nonce encrypted with Server Public Key\n #Check Server Nonce equality, Decrypt Client Nonce with Server Private Key\n #Sends Client Ticket encrypted with Client Public Key, and Client Nonce\n print(\"Sending Encrypted Client Ticket and Client Nonce\")\n client.send(bytes(\"Encrypted Client Ticket and Client Nonce\",\"utf8\"))\n ClientState = CHAT_ROOM_SELECT_STATE\n StepNum+=1\n else:\n #There is an error\n pass\n elif(ClientState == CHAT_ROOM_SELECT_STATE):\n if(StepNum == 2 ):\n print(msg)\n #Receives Client Ticket, timestamp, ChatroomName\n #Decrypt it with Server private key\n #Sends ChatRoom Key encrypted with Client Public Key\n print(\"Sending ChatRoom Key\")\n client.send(bytes(\"ChatRoom Key\",\"utf8\"))\n StepNum+=1\n ChatRoom = msg\n time.sleep(0.1)\n welcome = 'Welcome %s in Chat Room : %s! If you ever want to quit, type {quit} to exit.' % (name,ChatRoom)\n client.send(bytes(welcome, \"utf8\"))\n if ChatRoom not in ChatRooms.keys(): \n ChatRooms[ChatRoom] = []\n ChatRooms[ChatRoom].append((client,name))\n msg = \"%s has joined the chat!\" % name\n time.sleep(0.1)\n broadcast(bytes(msg, \"utf8\"),\"\",ChatRoom)\n ClientState = CHAT_STATE\n else:\n #There is an error\n pass\n \n elif ClientState == CHAT_STATE:\n if(StepNum == 3):\n print(msg)\n #Receives Client message encrypted with Chatroom key\n #Decrypt it with ChatRoom key\n #Calc checksum\n #Send Message and CheckSum with encrypted with Chat room Key\n broadcast(bytes(msg,\"utf8\"), name+\": \",ChatRoom)", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def process_messages(self):\r\n for p in self._platforms.values():\r\n if p.received_messages > 0:\r\n p.queue_received_messages()\r\n for p in self._platforms.values():\r\n if p.queued_messages > 0:\r\n p.process_queued_messages()", "def run(self):\n print('ClientThread[{}] is running!'.format(self.threadID))\n while True:\n request = self.receive()\n try:\n requestcode = request.split(',')[0]\n if requestcode == 'SYNCFROM':\n self.syncToClient()\n continue\n elif requestcode == 'SYNCTO':\n self.syncFromClient()\n continue\n elif requestcode == 'GETINDEX':\n self.sendIndex()\n continue\n elif requestcode == 'CLOSE':\n print('Connection to {}:{} closed'.format(self.ip,self.port))\n self.tcpsock.close()\n break\n elif not request:\n continue\n else:\n print(request, type(request))\n raise Exception('Unexpected bytes from client.')\n except KeyboardInterrupt:\n sys.exit()\n except Exception as err:\n traceback.print_exc()\n continue\n self.tcpsock.close()\n print('ClientThread[{}] exiting..'.format(self.threadID))", "def handle_message(self, msg):\n pass", "def run(self):\n\n try:\n if not self._connected:\n self.connect()\n\n while self._connected:\n msg = self._recvmsg()\n self.handle(msg)\n finally:\n if self._connected:\n self.disconnect()", "def process(self):\n\n try:\n self._read_buffer += self._socket.recv(4096)\n except socket.error as exc:\n if exc.errno not in [errno.EAGAIN,\n errno.EWOULDBLOCK,\n errno.WSAEWOULDBLOCK]:\n raise\n response, self._read_buffer = Message.decode(self._read_buffer)\n # Check if terminating RESPONSE_VALUE with body 00 01 00 00\n if (response.type == Message.SERVERDATA_RESPONSE_VALUE and\n response.body.encode(\"ascii\") == \"\\x00\\x01\\x00\\x00\"):\n response = Message(self._response[0].id,\n self._response[0].type,\n \"\".join([r.body for r in self._response]))\n self._active_requests[response.id].response = response\n self._response = []\n self._active_requests[response.id]\n elif response.type == Message.SERVERDATA_RESPONSE_VALUE:\n self._response.append(response)\n elif response.type == Message.SERVERDATA_AUTH_RESPONSE:\n self._active_requests[self._response[0].id].response = response\n # Clear empty SERVERDATA_RESPONSE_VALUE sent before\n # SERVERDATA_AUTH_RESPONSE\n self._response = []\n self._active_requests[response.id]", "def receive():\n while True:\n try:\n msg = client_socket.recv(BUFSIZ).decode(\"utf8\")\n msg_list.insert(tkinter.END, msg)\n \n except OSError: # Possibly client has left the chat.\n break", "def handle(self):\n global log_th\n sent = 1\n msg_body = ''\n get_recv = True\n get_data = True\n empty_check = 0\n # Looping session requests\n while 1:\n try:\n # If enabled sleep feauture\n if self.sleep_between != 0:\n time.sleep(self.sleep_between)\n # If no answer feauture\n if self.no_answer != 0:\n time.sleep(1)\n continue\n # Changing receive size if receiving data part\n if sent == 3 or sent == 4:\n data = self.request.recv(self.data_recv_size)\n else:\n data = self.request.recv(self.std_recv_size)\n if sent != 5:\n self.command_w_th_inc.write_commands(\n data=bytes(data).decode().encode('ascii', 'ignore')\n .decode().rstrip(), qid=self.message_id)\n # To many empty line received, closed thread\n if self.func_empty_check(data):\n if empty_check >= 3:\n break\n else:\n empty_check += 1\n continue\n # Logging session requests if steps not equal to data section\n if sent != 5:\n log_th.log_info('{} - {} client executed : \"{}\"'.format(\n self.message_id, self.client_ip, bytes(data).decode().rstrip()))\n # Break the loop\n if self.func_quit(data):\n break\n except Exception as ae:\n log_th.log_warning('{} encounter an error from {} thread : {}'.format(\n self.client_ip, threading.current_thread().name, str(ae)))\n break\n else:\n try:\n # Checking the all steps\n if self.func_rset(data):\n sent = 2\n continue\n if self.func_auth(data):\n continue\n if self.func_auth_plain(data):\n continue\n if self.func_starttls(data):\n continue\n # Starting the sent steps\n # Ehlo/hello\n if sent == 1:\n if self.func_ehlo(data) or self.func_helo(data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('command not found'))\n # Mail from, rcpt to, data\n elif sent == 2:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 2:\n get_data = False\n get_recv = False\n elif bytes(data).decode().encode('ascii',\n 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_recv = False\n if self.func_from(data, get_recv):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n if not get_recv:\n if self.func_to(data, get_recv, get_data):\n sent += 1\n get_recv = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # rcpt to and data\n elif sent == 3:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_data = False\n if self.func_to(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # data\n elif sent == 4:\n if self.func_to(data, get_recv, get_data):\n continue\n if self.func_data(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # content writing to file (if enabled) and quit statement\n elif sent == 5:\n data_list = bytes(data).decode().split('\\r\\n')\n for line in data_list:\n if str(line) == '.':\n if self.mail_save_enable != 0:\n out_file = open(self.mail_save_path + '/'\n + self.message_id + '.eml', 'w')\n out_file.write(msg_body)\n out_file.close()\n self.func_data_ok()\n sent = 1\n break\n else:\n msg_body += str(line) + '\\r\\n'\n except IndexError:\n if sent == 2:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n elif sent == 3:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))", "def handle(self, msg, peer_protocol):\n msg_id = msg[0]\n if msg_id == 0:\n self._handle_handshake(msg, peer_protocol)\n elif msg_id == 1: #update\n print(msg, len(msg))\n self._handle_update(msg)", "def __processmsg(self, msg, con, addr):\n\n msgid = msg[0]\n if msgid == message.NCONNECT:\n # Se conecta con un nodo y depsues con los demas\n print(\"\\n[P2P] Solicitud de conexion de\", addr)\n self.__process_connection(msg, con, addr)\n elif msgid == message.ADDTHIS:\n print(\"[P2P] Se agrego un nuevo nodo.\")\n self.__process_add(msg, con, addr)\n elif msgid == message.REQDIR:\n print(\"[SHARE] Se comparten elementos de la carpeta compartida.\")\n self.__process_reqdir(con, addr)\n elif msgid == message.GETFILE:\n self.__process_getfile(msg, con, addr)\n elif msgid == message.UPDIR:\n self.__process_updir(msg, con, addr)\n else:\n print(\"[PROCESSING] Mensaje no procesable de\", addr, \":\\n\", msg)\n\n con.close()", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def handle_client(self,conn,addr):\n print(f\"[NEW CONNECTION] {addr} connected\")\n client_id = \"\"\n connected = True\n while connected:\n try:\n try:\n msg_length = conn.recv(PREFIX).decode(FORMAT)\n except:\n print(f\"[{addr}] DISCONNECTED\")\n self.handle_unexpected_disconnect(client_id,conn)\n return\n\n if msg_length:\n try:\n msg_length = int(msg_length)\n try:\n raw_msg = conn.recv(msg_length).decode(FORMAT)\n except:\n print(f\"[{addr}] DISCONNECTED\")\n self.handle_unexpected_disconnect(client_id,conn)\n return\n message = json.loads(raw_msg)\n except ValueError:\n message = FAILURE_MESSAGE\n\n if message[\"HEADER\"] == DISCONNECT_MESSAGE:\n connected = False\n self.handle_disconnect(message,conn)\n\n elif message[\"HEADER\"] == \"CREATE\":\n session_id = \"\".join(random.choices(string.ascii_uppercase + string.digits, k = 4))\n indentifer = json.loads(message[\"MESSAGE\"])\n tokenDict = json.loads(indentifer[\"spotify_token\"])\n client_id = message[\"ID\"]\n self.create_session(session_id, message[\"ID\"], indentifer[\"display_name\"], tokenDict)\n self.add_connection_entry(message[\"ID\"], indentifer[\"display_name\"], session_id, True, conn, addr)\n self.create_spotify_player(session_id)\n if not self.sessions[session_id][\"HOST\"][\"spotify_player\"].is_spotify_running():\n self.send(\"STC\", client_id, \"PLEASE START SPOTIFY\")\n\n self.send(\"SESSION_ID\", client_id, str(session_id))\n\n elif message[\"HEADER\"] == \"GET_CURRENT_SONG\":\n player = self.get_session_player(self.get_session_from_user(message[\"ID\"]))\n if not self.sessions[session_id][\"HOST\"][\"spotify_player\"].is_spotify_running():\n self.send(\"STC\", client_id, \"PLEASE START SPOTIFY\")\n else:\n current_track = {}\n current_track[\"name\"] = player.sp.currently_playing()['item']['name']\n current_track[\"artist\"] = player.sp.currently_playing()['item']['album']['artists'][0]['name']\n track_json = json.dumps(current_track)\n self.send(\"CURRENT_SONG\", message[\"ID\"],track_json)\n\n elif message[\"HEADER\"] == \"SKIP\":\n player = self.get_session_player(self.get_session_from_user(message[\"ID\"]))\n session_id = self.get_session_from_user(message[\"ID\"])\n session_queue = self.get_session_queue(session_id)\n if len(session_queue) > 0:\n player.add_to_queue(session_queue[0][1])\n session_queue.pop(0)\n self.send_queue_update(session_id)\n player.next_track()\n\n elif message[\"HEADER\"] == \"REWIND\":\n player = self.get_session_player(self.get_session_from_user(message[\"ID\"]))\n player.previous_track()\n\n elif message[\"HEADER\"] == \"PLAY\":\n session_id = self.get_session_from_user(message[\"ID\"])\n player = self.get_session_player(self.get_session_from_user(message[\"ID\"]))\n player.toggle_playback()\n\n elif message[\"HEADER\"] == \"SEARCH\":\n player = self.get_session_player(self.get_session_from_user(message[\"ID\"]))\n song = message[\"MESSAGE\"]\n self.send(\"SEARCH_RESULTS\", message[\"ID\"], json.dumps(player.search(song)))\n\n\n\n\n elif message[\"HEADER\"] == \"ADD_TO_QUEUE\":\n track_data = json.loads(message[\"MESSAGE\"])\n self.add_to_session_queue(message[\"ID\"], (track_data[\"name\"],track_data['uri']))\n session_id = self.get_session_from_user(message[\"ID\"])\n\n\n elif message[\"HEADER\"] == \"QUEUE_UPDATE\":\n options = json.loads(message[\"MESSAGE\"])\n self.update_queue(message[\"ID\"],options)\n\n elif message[\"HEADER\"] == \"GET_USERS\":\n session_id = self.get_session_from_user(message[\"ID\"])\n users = self.sessions[session_id][\"USERS\"]\n self.send(\"USERS\", message[\"ID\"], json.dumps(users))\n\n elif message[\"HEADER\"] == \"SET_PERMISSION\":\n msg = json.loads(message[\"MESSAGE\"])\n session_id = self.get_session_from_user(message[\"ID\"])\n self.change_user_permissions(session_id, msg[\"client_id\"], msg[\"permission\"])\n new_permissions = {}\n new_permissions[\"permission\"] = msg[\"permission\"]\n new_permissions[\"value\"] = self.sessions[session_id][\"USERS\"][msg[\"client_id\"]][\"permissions\"][msg[\"permission\"]]\n self.send(\"PERMISSION_UPDATE\",msg[\"client_id\"], json.dumps(new_permissions))\n\n elif message[\"HEADER\"] == \"JOIN\":\n msg = json.loads(message[\"MESSAGE\"])\n session_id = msg[\"session_id\"]\n if session_id in self.sessions.keys():\n self.add_user_to_session(session_id,message[\"ID\"],msg[\"display_name\"])\n self.add_connection_entry(message[\"ID\"],msg[\"display_name\"],session_id, False, conn, addr)\n client_id = message[\"ID\"]\n\n session_info = {}\n session_info[\"session_id\"] = session_id\n session_info[\"host\"] = self.sessions[session_id][\"HOST\"][\"NAME\"]\n\n self.send(\"SESSION_INFO\", message[\"ID\"], json.dumps(session_info))\n self.send(\"QUEUE_UPDATE\", message[\"ID\"], json.dumps(self.get_session_queue(session_id)))\n self.broadcast_to_session(session_id,\"USERS\", json.dumps(self.sessions[session_id][\"USERS\"]))\n else:\n self.add_connection_entry(message[\"ID\"],msg[\"display_name\"],session_id, False, conn, addr)\n self.send(\"FAILURE\", message[\"ID\"], \"Session does not exist\")\n self.send(DISCONNECT_MESSAGE,message[\"ID\"],DISCONNECT_MESSAGE)\n self.delete_connection_entry(message[\"ID\"])\n break\n elif message[\"HEADER\"] == \"SET_PERMISSIONS\":\n msg = json.loads(message[\"MESSAGE\"])\n user_id = msg[\"client_id\"]\n permissions = json.loads(msg[\"permissions\"])\n for key in permissions.keys():\n self.set_permissions(user_id,key,permissions[key])\n self.print_sessions()\n\n elif message[\"HEADER\"] == \"BROADCAST_S\":\n session_id = self.connections[message[\"ID\"]][\"session_id\"]\n self.broadcast_to_session(session_id,\"BROADCAST_S\", message[\"MESSAGE\"])\n elif message[\"HEADER\"] == \"BROADCAST\":\n self.broadcast_to_all(\"BROADCAST\", message[\"MESSAGE\"])\n\n elif message[\"HEADER\"] == \"PLAYBACK\":\n session_id = self.connections[message[\"ID\"]][\"session_id\"]\n sp = self.sessions[session_id][\"HOST\"][\"spotify_player\"]\n if not sp.toggle_playback():\n self.broadcast_to_session(self.get_session_from_user(client_id), \"FAILURE\", \"Please Start Spotify\")\n\n else:\n print(message[\"MESSAGE\"])\n except Exception as ex:\n print(str(ex))\n\n print(\"Thread Closing\")", "def handleClient(self, connection, address):\r\n # time.sleep(5) #server Action\r\n while True:\r\n try:\r\n data = connection.recv(1024).decode(\"utf-8\")\r\n except:\r\n print('client disconnect: ', address, 'at', self.now())\r\n data = \"\"\r\n\r\n if not data: break\r\n\r\n data = self.change_host(data, address)\r\n result = self.manag_bd.dispatcher(data)\r\n\r\n mutex = thread.allocate_lock()\r\n\r\n\r\n if type(result)==type(list()):\r\n mutex.acquire() #Lock interrupt\r\n l = len(result)\r\n reply = str(l)\r\n connection.send(reply.encode(\"utf-8\"))\r\n for line in result:\r\n time.sleep(0.0025)\r\n reply = line\r\n connection.send(reply.encode(\"utf-8\"))\r\n mutex.release()# permission to interrupt\r\n else:\r\n reply = str(self.now())\r\n connection.send(reply.encode(\"utf-8\"))\r\n\r\n\r\n\r\n connection.close()" ]
[ "0.7975024", "0.74111116", "0.74002844", "0.7238532", "0.72266316", "0.7161251", "0.70975065", "0.7079745", "0.69220155", "0.68791217", "0.68509686", "0.6820404", "0.6802297", "0.6784278", "0.67002666", "0.66915977", "0.668222", "0.6679292", "0.66399705", "0.6637506", "0.66354305", "0.66234875", "0.6561295", "0.6552059", "0.65418494", "0.6541516", "0.6536615", "0.6508889", "0.64990884", "0.6492148", "0.6486354", "0.6455199", "0.64550304", "0.6451233", "0.6446244", "0.64289325", "0.6409321", "0.63966393", "0.6392813", "0.63876355", "0.63625306", "0.6357403", "0.6350529", "0.63426787", "0.633477", "0.6315116", "0.6303391", "0.6296119", "0.62939566", "0.6288499", "0.62849396", "0.6284461", "0.6281832", "0.6281054", "0.6278749", "0.62739915", "0.627343", "0.62593603", "0.6254838", "0.6253665", "0.62501204", "0.6239785", "0.6231207", "0.6219772", "0.62111545", "0.6207561", "0.6206401", "0.6192997", "0.61868477", "0.6180187", "0.61798805", "0.6175677", "0.61734444", "0.6169791", "0.61678207", "0.61643565", "0.61633027", "0.6160326", "0.6148841", "0.6137755", "0.61361885", "0.61252767", "0.61233944", "0.61226034", "0.6107487", "0.61056954", "0.6105152", "0.61047375", "0.61030734", "0.60918933", "0.6082171", "0.6081073", "0.6075366", "0.60701007", "0.60661346", "0.60654837", "0.605859", "0.6055668", "0.6055644", "0.6054432", "0.6051455" ]
0.0
-1
Encontra o ponto G1, a partir dos elementos alfa (a) e beta (b), que expressa o grau de confiabilidade do conjunto de classes.
def assurance(a, b): return a - b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_vb_class(self, a_feats, a_toks1, a_toks2):\n # find intersecting verb classes\n vb_classes = Counter()\n vb_cls1 = vb_cls2 = None\n for w1, p1 in a_toks1:\n if w1 not in LCSI or p1 not in VB_TAGS:\n continue\n vb_cls1 = LCSI[w1]\n for w2, p2 in a_toks2:\n if w2 not in LCSI or p2 not in VB_TAGS:\n continue\n vb_cls2 = LCSI[w2]\n vb_classes.update(vb_cls1 & vb_cls2)\n for vbc, cnt in vb_classes.iteritems():\n a_feats[\"LCSI-\" + vbc] = cnt\n # obtain VB tag vectors\n a_feats[\"VBTags1-\" + self._get_arg_vb_class(a_toks1)] = 1.\n a_feats[\"VBTags2-\" + self._get_arg_vb_class(a_toks2)] = 1.", "def b_class_a(self):\n return self._b_class_a", "def classes(self):\n #print \"making classes again!\"\n l = []\n for p in self.marks:\n l.append(psi_class(self,p))\n for d in range(1, self.dimension + 1):\n l.append(kappa_class(self,d))\n for i in range(1, self.genus+1):\n l.append(chern_char(self, 2*i-1))\n if True:#self.genus != 0:\n l.append(irreducible_boundary(self))\n marks = set(self.marks)\n reducible_boundaries = []\n if self.n != 0:\n first_mark_list = [marks.pop()] \n for g1 in range(0, self.genus + 1):\n for p in subsets(marks):\n r_marks = set(first_mark_list + p)\n if 3*g1 - 3 + len(r_marks) + 1 >= 0 and 3*(self.genus-g1) - 3 + self.n - len(r_marks) + 1 >= 0:\n reducible_boundaries.append( reducible_boundary(self, Mgn(g1, r_marks)) )\n \n reducible_boundaries.sort(key = lambda b: sorted(list(b.component1.marks)))\n reducible_boundaries.sort(key = lambda b: len(b.component1.marks))\n reducible_boundaries.sort(key = lambda b: b.component1.genus)\n \n else: #self.n == 0\n for g1 in range(1, floor(self.genus/2.0)+1):\n reducible_boundaries.append(reducible_boundary(self, Mgn(g1, []))) \n \n \n l += reducible_boundaries \n \n for i in range(1,self.genus+1):\n l.append(lambda_class(self,i))\n return l", "def translate_all_poslists_to_ourclass_numb(motifs_dict,gnum_classes_rel,cons_pos_dict,current_class,other_classes_ok):\n current_poslists=cons_pos_dict[current_class]\n current_motif = motifs_dict[current_class]\n show_class={\"A\":True,\"B\":True,\"C\":True,\"F\":True}\n for gpcr_class in other_classes_ok:\n for cons_pos_li in cons_pos_dict[gpcr_class]:\n for el in cons_pos_li:\n pos_nm=el[0]\n s=re.search(\"([A-Z]?)([\\d\\.]+)\",pos_nm)\n AA=s.group(1)\n bw_pos_ok=s.group(2)\n try:\n current_bw_pos=gnum_classes_rel[gpcr_class][bw_pos_ok]\n except Exception:\n current_bw_pos=\"Position not found\"\n el[2]=\"None\"\n el[0]=AA + bw_pos_ok + gpcr_class.lower()\n el[1]=current_bw_pos\n motif_info = motifs_dict[gpcr_class]\n if motif_info:\n for el in motif_info[0]:\n bw_pos=el[1][1:]\n if bw_pos in gnum_classes_rel[gpcr_class]:\n current_bw_pos=gnum_classes_rel[gpcr_class][bw_pos]\n el[4]=current_bw_pos\n none_classes=list({\"A\",\"B\",\"C\",\"F\"} - set(other_classes_ok + list(current_class)))\n for n_class in none_classes:\n show_class[n_class]=False\n i=0\n while i < len(cons_pos_dict[n_class]):\n cons_pos_dict[n_class][i]=None\n i+=1\n motifs_dict[n_class]=[]\n return (show_class,current_poslists,current_motif,other_classes_ok)", "def __init__(self, p1_proba=0.5):\n self.p1_proba = p1_proba", "def test_class():\n riskfree = .03\n maturity = 30/365\n moneyness = np.linspace(-.04, .04, 10)\n premium = np.ones_like(moneyness) * .05\n call = True\n data = {'riskfree': riskfree, 'maturity': maturity,\n 'moneyness': moneyness, 'call': call, 'premium': premium}\n\n sigma = .13\n bsm = BSmodel(sigma, data)\n\n weights = [.63]\n means = [-.01, .09]\n stds = [.16, .05]\n param = weights + means + stds\n mbs = MBSmodel(param, data)\n\n param_a, param_p, param_c = 4, 1.5, -.05\n gb2 = GB2model([param_a, param_p, param_c], data)\n print(gb2.get_pnames())\n\n plt.figure()\n for model in [bsm, mbs, gb2]:\n plt.plot(moneyness, model.density(moneyness), label=model.get_name())\n plt.legend()\n plt.show()\n\n plt.figure()\n for model in [bsm, mbs, gb2]:\n plt.plot(moneyness, model.premium(), label=model.get_name())\n plt.legend()\n plt.show()\n\n plt.figure()\n for model in [bsm, mbs, gb2]:\n plt.plot(moneyness, model.impvol(), label=model.get_name())\n plt.legend()\n plt.show()\n\n print('BS objective function = %.4f' % bsm.objective(sigma))\n print('GB2 objective function = %.4f'\n % gb2.objective([param_a, param_p, param_c]))", "def getCombination(mu1, mu2, sig1, sig2, confidence1, confidence2):\n\tglobal alpha, beta, gamma\n\n\t#Standard Bayesian\n\t# sigNew = math.sqrt(math.pow(sig1, 2) + math.pow(sig2, 2))\n\t# muNew = u1 + u2\n\t# return muNew, sigNew \n\n\t##In accordance with the nature papers:\n\tsigNew = (math.pow(sig1,2) * math.pow(sig2, 2)) \\\n\t/ float((math.pow(sig1,2) + math.pow(sig2, 2)))\n\tinv1 = 1 / float((math.pow(sig1, 2)))\n\tinv2 = 1 / float((math.pow(sig2, 2)))\n\tsumInverses = inv1 + inv2\n\n\t##inverse standard deviations squared\n\t# w1 = inv1 / float(sumInverses)\n\t# w2 = inv2 / float(sumInverses)\n\n\t## equal weighting\n\t# w1 = .5\n\t# w2 = .5\n\n\t## weightings based off of confidence\n\t# summation = confidence1 + confidence2\n\t# w1 = confidence1 / float(summation)\n\t# w2 = confidence2 / float(summation)\n\n\t##weightings with exponentials\n\t# w1 = w1**.001\n\t# w2 = w2**.001\n\t# newSummation = w1 + w2\n\t# w1 = w1 / float(newSummation)\n\t# w2 = w2 / float(newSummation)\n\n\t##weightings with polynomial factors\n\tw1 = (beta * confidence1 + alpha)**gamma \n\tw2 = (beta * confidence2 + alpha)**gamma \n\tnewSummation = w1 + w2\n\tw1 = w1 / float(newSummation)\n\tw2 = w2 / float(newSummation)\n\n\tmuNew = w1 * mu1 + w2 * mu2\n\treturn muNew, sigNew", "def class_combinations(self):\n classes_from = self.classes_from\n\n # Split the AND from the OR text eg. ['(HU or SB) ', 'G']\n gs_split = [ '' if pd.isnull(x) else x.split('& ') for x in classes_from['DESCR.y'].as_matrix() ]\n\n # lambda map through two levels of list to remove ',' and '(' and ')' from text\n # split all of the values that contain or again for expanding the frame\n regex = re.compile('[,\\)\\(]')\n or_split = map(lambda x: map(lambda y: regex.sub('', y ) ,x) if len(x) > 0 else x , gs_split)\n or_split = [ g[0].split(\"or\") if len(g) > 0 else ' ' for g in or_split]\n\n # number of times to repeat based on length of values in list eg. each OR needs a new row\n or_rep = [ len(g) for g in or_split]\n\n # repeat class names the right amount of times\n classname = np.repeat(classes_from['FULL'].as_matrix(), or_rep )\n\n # unlist the or split to have the correct values per row\n or_reqs = list(chain.from_iterable(or_split))\n\n # extract all AND req text\n and_reqs = [ g[1:] if len(g) > 1 else ' ' for g in gs_split]\n\n # if theres a list we concat them together\n concat_reqs = [ ''.join(map(str, g)) if isinstance(g, list) else '' for g in and_reqs]\n\n # repeat correct number of AND reqs\n and_reqs = np.repeat(concat_reqs, or_rep)\n\n # make dataframe, concat columns of interest\n # rename columns \n # HEAVY ---------\n expanded = pd.DataFrame([classname, or_reqs, and_reqs]).T\n expanded.columns = ['CLS','OR','AND']\n as_strings = expanded['OR'] + expanded['AND'] \n expanded = pd.DataFrame([classname, as_strings]).T\n expanded.columns = ['CLS','GS']\n expanded = expanded.drop_duplicates()\n\n return expanded", "def testExpected_a_and_self_distr_byRef_classifiers(self):\n\t\t#Get first filter opts obj + set the classifier objects specifically\n\t\tfilterObjA = self.filterOptObj #First set\n\t\tclassifiersA = filteredAtomComboObjMaps.getClassifiersFromOptsObj(self.classifierOpts)\n\t\tfilterObjA.classificationObjs = classifiersA\n\n\t\t#Get second filter opts obj; use byReference classifiers\n\t\tself.useGroups = [ [0,0] ]\n\t\tself.createTestObjs()\n\t\tfilterObjB = self.filterOptObj\n\t\tfilterObjB.classificationOpts = None #Force to use the objects\n\t\tclassifiersB = classifierObjsHelp.getByReferenceClassifiers(classifiersA)\n\t\tfilterObjB.classificationObjs = classifiersB\n\n\t\t#Run the functions - binValGetterA must always be run first\n\t\tbinValGetterA = optsObjMapHelp.getMultiDimBinValGetterFromOptsObjs([filterObjA])\n\t\tbinValGetterB = optsObjMapHelp.getMultiDimBinValGetterFromOptsObjs([filterObjB])\n\n\t\tactValsA = binValGetterA.getValsToBin(self.sparseMatrixCalculator)\n\t\tactValsB = binValGetterB.getValsToBin(self.sparseMatrixCalculator)\n\n\t\t#Compare actual and expected\n\t\tdistAA, distBB, distCC = 0,0,0\n\t\tdistAB, distAC, distBC = 1,2,1\n\t\tdistBA, distCA, distCB = distAB, distAC, distBC\n\n\t\texpValsA = [ (3,), (2,), (1,) ]\n\t\texpValsB = [ (distAA,), (distAB,), (distAC,), (distBA,), (distBB,), (distBC,),\n\t\t (distCA,), (distCB,), (distCC,) ]\n\n\t\tfor expIter,actIter in it.zip_longest(expValsA, actValsA):\n\t\t\t[self.assertAlmostEqual(exp,act) for exp,act in it.zip_longest(expIter,actIter)]\n\n\t\tfor expIter,actIter in it.zip_longest(expValsB, actValsB):\n\t\t\t[self.assertAlmostEqual(exp,act) for exp,act in it.zip_longest(expIter,actIter)]", "def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\n #print vec2Classify\n # [0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]\n \n #print p0Vec\n \n #print p1Vec\n \"\"\"[-3.04452244 -3.04452244 -3.04452244 -2.35137526 -2.35137526 -3.04452244\n -3.04452244 -3.04452244 -2.35137526 -2.35137526 -3.04452244 -3.04452244\n -3.04452244 -2.35137526 -2.35137526 -2.35137526 -2.35137526 -2.35137526\n -3.04452244 -1.94591015 -3.04452244 -2.35137526 -2.35137526 -3.04452244\n -1.94591015 -3.04452244 -1.65822808 -3.04452244 -2.35137526 -3.04452244\n -3.04452244 -3.04452244]\"\"\" \n \n #print vec2Classify * p1Vec\n \"\"\"\n [-0. -3.04452244 -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -3.04452244\n -0. -0. -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -0.\n -0. -3.04452244]\n \"\"\"\n \n #print sum(vec2Classify * p1Vec)\n # -9.13356731317\n \n p1 = sum(vec2Classify * p1Vec) + log(pClass1) #element-wise mult\n p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)\n \n if p1 > p0:\n return 1\n else: \n return 0", "def create_conserved_pos_list_otherclass(gpcr_pdb,gpcr_aa, i,my_pos, cons_pos_li, multiple_chains,chain_name,gnum_classes_rel,dict_class,current_class):\n my_pos_bw=my_pos.split(\"x\")[0]\n add_chain_name=\"\"\n if multiple_chains:\n add_chain_name=\":\"+chain_name\n while i < len(cons_pos_li):\n cons_pos_bw_ourclass = cons_pos_li[i][1]\n if my_pos_bw==cons_pos_bw_ourclass:\n pos_range=find_range_from_cons_pos(my_pos, gpcr_pdb)\n if pos_range:\n cons_pos_li[i][2]=pos_range + add_chain_name\n cons_pos_li[i][1]=\"Correspods to \"+cons_pos_bw_ourclass + current_class.lower()\n i+=1", "def part1a_2():\n mediumCRF = submission.LinearChainCRF( [\"-FEAT-\", \"-SIZE-\"],\n submission.binaryFeatureFunction,\n Counter({\n (\"-FEAT-\", \"-SIZE-\") : 0.8,\n (\"-SIZE-\", \"-FEAT-\") : 0.5,\n (\"-SIZE-\", \"-SIZE-\") : 1.,\n (\"-FEAT-\", \"-FEAT-\") : 1.,\n (\"-FEAT-\", \"Beautiful\") : 1.,\n (\"-SIZE-\", \"Beautiful\") : 0.5,\n (\"-FEAT-\", \"house\") : 1.,\n (\"-SIZE-\", \"house\") : 0.5,\n (\"-FEAT-\", \"2\") : 0.5,\n (\"-SIZE-\", \"2\") : 1.0,\n (\"-FEAT-\", \"bedroom\") : 0.5,\n (\"-SIZE-\", \"bedroom\") : 1.0,}) )\n moreExampleInputs = [\n \"This is a Beautiful 2 bedroom\".split(),\n \"2 bedroom Beautiful house\".split(),\n ]\n moreExampleTags = [\n ['-FEAT-', '-FEAT-', '-FEAT-', '-FEAT-', '-SIZE-', '-SIZE-'],\n ['-SIZE-', '-SIZE-', '-FEAT-', '-FEAT-']\n ]\n for xs, ys in zip(moreExampleInputs, moreExampleTags):\n ys_ = submission.computeViterbi(mediumCRF, xs)\n grader.requireIsEqual( ys, ys_ )", "def _get_gen_classes(self, bgc_like, gcf_as_cutoff=0.5):\n # assess if bgc or gcf\n is_bgc = isinstance(bgc_like, BGC)\n if is_bgc:\n # get parent gcf for bgc\n bgc_like_gcf = [\n gcf for gcf in self.npl.gcfs\n if bgc_like.bgc_id in [b.bgc_id for b in gcf.bgcs]\n ][0]\n # gather AS classes and convert to names in scoring dict\n as_classes = self.npl.class_matches.convert_as_classes(\n bgc_like.product_prediction.split(\".\"))\n bgc_like_classes_dict = {\n \"bigscape_class\": bgc_like_gcf.bigscape_class,\n # str - always one bigscape class right?\n \"as_classes\": as_classes\n } # list(str)\n else:\n as_classes = self.npl.class_matches.convert_as_classes(\n self.npl.class_matches.get_gcf_as_classes(\n bgc_like, gcf_as_cutoff))\n bgc_like_classes_dict = {\n \"bigscape_class\": bgc_like.bigscape_class,\n # str - always one bigscape class right?\n \"as_classes\": as_classes\n } # list(str)\n return bgc_like_classes_dict", "def build_opt(self):\n student_taken = self.student.student_hist[['FULL','DESCR.y']]\n\n requiments = self.major_map.cleaned_major_data[['REQID','L','MA','CS','HU','SB','SQ','SG','C','G','H','Honor']]\n\n requiments.is_copy = False\n requiments['REQID'] = requiments['REQID'].astype('str')\n\n requirements_to = pd.merge(self.graph, requiments, how='inner', left_on='REQ', right_on='REQID')\n \n # requirements_to = pd.merge(self.graph, requiments, how='left', left_on='REQ', right_on='REQID')\n classes_from = pd.merge(self.graph, student_taken, how='left', left_on='CLS', right_on='FULL')\n\n return classes_from, requirements_to", "def __init__(self, classes, alpha=1.0):\n self.alpha_ = alpha\n self.classes_ = classes.tolist()\n self.class_counts_ = self.alpha_*np.ones(len(self.classes_))\n self.features_ = []", "def probinit(self, aaa, n_obj):\n # Set algorithm...\n if aaa == 'nsga':\n algo = nsga_II(m=0.05)\n else:\n algo = jde(memory=True)\n #algo = mde_pbx()\n #algo = de_1220()\n\n # ...and initialize problem with instance atributes\n prob = mga_1dsm(seq = self.FBseq,\n multi_objective = n_obj,\n dsm_dv_barrier = self.MAX_DV)\n\n prob.set_vinf((self.C3)**0.5)\n prob.set_tof(self.TOF[0], self.TOF[1])\n prob.set_entry_barrier(self.entry_barrier)\n prob.set_launch_window(self.EPOCHSTART, self.EPOCHEND)\n return prob, algo", "def mezclar_bolsa(self):", "def __init__(self, classes, data_size):\r\n self.classes = classes\r\n self.data_size = data_size\r\n self.conditional_prob = {class_:{} for class_ in classes} # Conditional Probability Table for storing parameters useful to compute P(feat|class_)\r\n self.class_prob = {} # Stores the priors\r", "def naive_bn(data, attributes):\n bn = []\n attr = attributes['attr'].tolist()\n # each attribute is only dependent on the class node\n i = 0\n while (i < len(attr)-1):\n row = [attr[i], attr[-1]]\n bn.append(row)\n i= i + 1\n # frequency table \n freq = counts_table(data, attributes)\n # conditional probabilities and prior probabilities\n cond_probs, prior0, prior1 = conditional_probability(data, attributes, freq)\n\n return bn, cond_probs, prior0, prior1", "def optg_xtb(self, acc='normal', nproc=1):\n for k in ['OMP_NUM_THREADS', 'MKL_NUM_THREADS']:\n os.environ[k] = '%d'%nproc\n uc = io2.Units()\n const = uc.h2e #/uc.b2a\n fmax = {'normal':1e-3, 'tight':8.e-4, 'vtight':2e-4}[acc] * const\n # 0.025 eV/A (ase force unit) ~ 0.001 Hartree/A (xtb force unit)\n print('acc = ', acc, 'fmax=', fmax)\n for cid in range(self.nconf):\n print('cid=', cid)\n c1 = self.mol.GetConformer(cid)\n zs = [ ai.GetAtomicNum() for ai in c1.GetAtoms() ]\n coords = []\n for ia in range(self.na):\n o = c1.GetAtomPosition(ia)\n coords.append( [o.x, o.y, o.z] )\n m1 = ase.Atoms(zs, coords)\n # create the calculator for GFN0-xTB under periodic boundary conditions\n calc = GFN2(print_level=2) # setting to 1 or 2 (the default) is ok, not so for 3\n m1.set_calculator(calc)\n e = m1.get_potential_energy()\n print(\"Initial energy: eV, Eh\", e, e/Hartree)\n relax = PreconFIRE(m1, precon=None, trajectory=None) # = 'tmp.traj')\n relax.run(fmax = fmax)\n for i in range(self.na):\n pt = Point3D()\n pt.x, pt.y, pt.z = coordsU[i]\n c1.SetAtomPosition(i, pt)", "def equivalence_classes(self):\n\n # Two states `a` and `b` are j-equivalent if and only if there\n # is a bijection `\\varphi` between paths of length <= j\n # starting at `a` and paths starting at `b` with the following\n # properties: Let `p_a` be a path from `a` to `a'` and `p_b` a\n # path from `b` to `b'` such that `\\varphi(p_a)=p_b`, then\n #\n # - `p_a.\\mathit{word}_{in}=p_b.\\mathit{word}_{in}`,\n # - `p_a.\\mathit{word}_{out}=p_b.\\mathit{word}_{out}`,\n # - `a'` and `b'` have the same output label, and\n # - `a'` and `b'` are both final or both non-final.\n\n # If for some j the relations j-1 equivalent and j-equivalent\n # coincide, then they are equal to the equivalence relation\n # described in the docstring.\n\n # classes_current holds the equivalence classes of\n # j-equivalence, classes_previous holds the equivalence\n # classes of j-1 equivalence.\n\n # initialize with 0-equivalence\n classes_previous = []\n key_0 = lambda state: (state.is_final, state.color, state.word_out,\n state.final_word_out)\n states_grouped = full_group_by(self.states(), key=key_0)\n classes_current = [equivalence_class for\n (key,equivalence_class) in states_grouped]\n\n while len(classes_current) != len(classes_previous):\n class_of = {}\n classes_previous = classes_current\n classes_current = []\n\n for k in range(len(classes_previous)):\n for state in classes_previous[k]:\n class_of[state] = k\n\n key_current = lambda state: sorted(\n [(transition.word_in,\n transition.word_out,\n class_of[transition.to_state])\n for transition in state.transitions])\n\n for class_previous in classes_previous:\n states_grouped = full_group_by(class_previous, key=key_current)\n classes_current.extend([equivalence_class for\n (key,equivalence_class) in states_grouped])\n\n return classes_current", "def analyze_belief_strength_with_bias(self, G):\r\n n = []\r\n nbs_list = []\r\n for node in G.nodes: #cycles through the nodes of the graph to mine the attributes\r\n n.append(node) #appends each node to a list that will be put into a dictionary\r\n pbs_list = []\r\n og_bs = G.nodes[node]['belief_strength'] #mines the numerical value for a nodes belief strength, from a pre-set node attribute\r\n unc = G.nodes[node]['uncertainty'] #mines the numerical value for a nodes belief uncertainty, from a pre-set node attribute\r\n prob = G.nodes[node]['probability']\r\n for pre in G.predecessors(node):\r\n ew = G.edges[pre, node]['weight'] #mines the numerical value of an edge's weight, from a pre-set edge attribute\r\n pre_bs = G.nodes[pre]['belief_strength'] #mines the numerical value for a predecessors belief strength, from a pre-set node attribute\r\n x = ew * pre_bs #determines how much a node values its neighbor's opinion.\r\n pbs_list.append(x) #puts all values for predecessor belief strangths in a list\r\n if len(pbs_list) == 0:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n else:\r\n apbs = sum(pbs_list)/len(pbs_list) #calculates the average predecessor belief strength value for a node\r\n if apbs*og_bs > 0:\r\n if apbs > 0:\r\n nbs = min(og_bs + (0.1*prob*unc*apbs), 100)\r\n else:\r\n nbs = max(og_bs + (0.1*prob*unc*apbs), -100)\r\n nbs = int(nbs)\r\n else:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n nbs_list.append(nbs) #the new belief strengths are appended to a list that will be put into adictionary\r\n change = dict(zip(n, nbs_list)) #creates a dictionary from two lists which stores the nodes as keys and their new belief strengths as values\r\n print(change)\r\n return change #this will be used to update the list in a different function\r", "def ACBF_Bparam(trajA,trajB):\n lenCBFs = len(trajA[\"CBFCons\"][-1])\n CBFs = CBFSets[CBFlens.index(lenCBFs)]\n Balpha, Kp, Kd = trajB[\"param\"]\n return SimuTest(CBFs,Balpha,Kp,Kd)", "def create_conserved_motif_list_otherclass(gpcr_pdb,gpcr_aa,j,my_pos,motifs,multiple_chains,chain_name):\n my_pos_bw=my_pos.split(\"x\")[0]\n (my_aa,chain)=gpcr_aa[my_pos]\n add_chain_name=\"\"\n if multiple_chains:\n add_chain_name=\":\"+chain_name \n while j < len(motifs):\n cons_pos_bw = motifs[j][4]\n if my_pos_bw==cons_pos_bw:\n pos_range=find_range_from_cons_pos(my_pos, gpcr_pdb)\n if pos_range:\n motifs[j][2]=True\n motifs[j][3]=pos_range + add_chain_name\n j+=1", "def __init__(self, nfeat, nhid, nclass, dropout, alpha):\n super(GCN, self).__init__()\n self.dropout = dropout\n\n self.conv1 = GraphConvolutionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, not_final=True)\n \n self.add_module('conv1', self.conv1)\n\n self.conv2 = GraphConvolutionLayer(nhid, nclass, dropout=dropout, alpha=alpha, not_final=False)", "def _add_class_assignments(self, roidb):\n for entry in roidb:\n gt_overlaps = entry['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n entry['max_classes'] = max_classes\n entry['max_overlaps'] = max_overlaps\n # sanity checks\n # if max overlap is 0, the class must be background (class 0)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # if max overlap > 0, the class must be a fg class (not class 0)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)", "def test_RecurrentNeuralNetwork_probas_to_classes():\n arr1 = np.asarray([0.1, 0.2, 0.7], dtype=np.float32)\n arr2 = np.asarray([0.1], dtype=np.float32)\n assert RecurrentNeuralNetwork.probas_to_classes(arr1) == 2\n assert RecurrentNeuralNetwork.probas_to_classes(arr2) == 0", "def find_joint_extension(self, modelb, modelc, mace_time=10, prover_time=60):\n n = modelb.cardinality\n ne = ['b'+str(x)+'!=b'+str(y) for x in range(n) for y in range(x+1,n)]\n n = modelc.cardinality\n ne += ['c'+str(x)+'!=c'+str(y) for x in range(n) for y in range(x+1,n)]\n return prover9(self.axioms+ne+modelb.positive_diagram('b') + \n modelc.positive_diagram('c'), [], mace_time, prover_time)", "def test_multiclass_balance(self):\n dataset = make_fixture(binary=False, split=False)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y) is oz\n assert oz._mode == BALANCE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def __init__(self,Capas: list=[],n_fret: float=0,pext: float=0,gamma_h20: float=9.81):\r\n self.capas=Capas\r\n self.n_fret=n_fret\r\n self.pext = pext\r\n self.gamma_h20=gamma_h20\r\n pass", "def weight_by_class_balance(truth, classes=None):\n\n if classes is None:\n # Include all classes\n classes = np.unique(truth)\n\n weight_map = np.zeros_like(truth, dtype=np.float32)\n total_amount = np.product(truth.shape)\n\n min_weight = sys.maxint\n for c in classes:\n class_mask = np.where(truth==c,1,0)\n class_weight = 1/((np.sum(class_mask)+1e-8)/total_amount)\n if class_weight < min_weight:\n min_weight = class_weight\n weight_map += (class_mask*class_weight)#/total_amount\n weight_map /= min_weight\n return weight_map", "def _classifier(self, classes):\n # Initialize key variables\n pseudo = np.linalg.pinv(self.data)\n result = np.dot(pseudo, classes)\n return result", "def jarque_bera(self,alpha=0.05):\n self._finalize()\n JB = self.vcount/6*(self.vskewness**2 + 1/4*((self.vkurtosis-3)**2))\n if chi2 is None:\n p = \"scipy missing\"\n else:\n p = 1 - chi2.cdf(JB,2)\n return JB,p", "def __init__(self, low=0.0, alpha=0.0, beta=1.0):\n super().__init__()\n self.low = low\n self.alpha = alpha\n self.beta = beta\n self.type = 'Gamma'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('Laguerre')\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'", "def __init__(self, input, n_in, n_out,\r\n W=None, b=None, prob_constraint_on=None):\r\n\r\n # initialize weight matrix W\r\n if W is None:\r\n self.W = theano.shared(\r\n value=np.zeros((n_in, n_out), dtype=theano.config.floatX),\r\n name='W')\r\n else:\r\n self.W = W\r\n\r\n # initialize bias b\r\n if b is None:\r\n self.b = theano.shared(\r\n value=np.zeros((n_out,), dtype=theano.config.floatX),\r\n name='b')\r\n else:\r\n self.b = b\r\n\r\n # compute prediction\r\n # the linear output\r\n lin_output = T.dot(input, self.W) + self.b\r\n \r\n if prob_constraint_on == None:\r\n #### we do not use those probability constraints\r\n self.y_pred = Sigmoid(lin_output)\r\n\r\n elif prob_constraint_on == \"top\":\r\n #### We first predict the probability of each class using softmax.\r\n # We then weight those probabilities by multiplying them by the\r\n # probability of their parent in the Galaxy Zoo Decision Tree.\r\n \r\n # class 1\r\n prob_Class1 = SoftMax(lin_output[:,0:3])\r\n \r\n # class 2\r\n prob_Class2 = SoftMax(lin_output[:,3:5])\r\n # weight these probabilities using the probability of class 1.2\r\n prob_Class2 *= T.shape_padright(prob_Class1[:,1])\r\n \r\n # class 3\r\n prob_Class3 = SoftMax(lin_output[:,5:7])\r\n # weight these probabilities using the probability of class 2.2\r\n prob_Class3 *= T.shape_padright(prob_Class2[:,1])\r\n \r\n # class 4\r\n prob_Class4 = SoftMax(lin_output[:,7:9])\r\n # weight these probabilities using the probability of class 2.2\r\n prob_Class4 *= T.shape_padright(prob_Class2[:,1])\r\n \r\n # class 5\r\n prob_Class5 = SoftMax(lin_output[:,9:13])\r\n # weight these probabilities using the probability of class 2.2\r\n prob_Class5 *= T.shape_padright(prob_Class2[:,1])\r\n \r\n # class 6\r\n prob_Class6 = SoftMax(lin_output[:,13:15])\r\n \r\n # class 7\r\n prob_Class7 = SoftMax(lin_output[:,15:18])\r\n # weight these probabilities using the probability of class 1.1\r\n prob_Class7 *= T.shape_padright(prob_Class1[:,0])\r\n \r\n # class 8\r\n prob_Class8 = SoftMax(lin_output[:,18:25])\r\n # weight these probabilities using the probability of class 6.1\r\n prob_Class8 *= T.shape_padright(prob_Class6[:,0])\r\n \r\n # class 9\r\n prob_Class9 = SoftMax(lin_output[:,25:28])\r\n # weight these probabilities using the probability of class 2.1\r\n prob_Class9 *= T.shape_padright(prob_Class2[:,0])\r\n \r\n # class 10\r\n prob_Class10 = SoftMax(lin_output[:,28:31])\r\n # weight these probabilities using the probability of class 4.1\r\n prob_Class10 *= T.shape_padright(prob_Class4[:,0])\r\n \r\n # class 11\r\n prob_Class11 = SoftMax(lin_output[:,31:37])\r\n # weight these probabilities using the probability of class 4.1\r\n prob_Class11 *= T.shape_padright(prob_Class4[:,0])\r\n \r\n # concatenate all the probabilities into a single tensor variable\r\n self.y_pred = T.concatenate(\r\n [prob_Class1, prob_Class2, prob_Class3, prob_Class4,\r\n prob_Class5, prob_Class6, prob_Class7, prob_Class8,\r\n prob_Class9, prob_Class10, prob_Class11], axis=1)\r\n elif prob_constraint_on == \"down\":\r\n #### we use those probability constraints\r\n \r\n # the following probabilities should sum up to 1, so we use SoftMax\r\n # to predict all of them\r\n ind1 = [2, 8, 15, 16, 17, 25, 26, 27, 31, 32, 33, 34, 35, 36]\r\n p1 = SoftMax(lin_output[:,ind1])\r\n prob_Class1_3 = p1[:,0]\r\n prob_Class4_2 = p1[:,1]\r\n prob_Class7 = p1[:,2:5]\r\n prob_Class9 = p1[:,5:8]\r\n prob_Class11 = p1[:,8:14]\r\n \r\n prob_Class4_1 = T.sum(prob_Class11, axis=1)\r\n prob_Class2_1 = T.sum(prob_Class9, axis=1)\r\n prob_Class2_2 = prob_Class4_1 + prob_Class4_2\r\n prob_Class1_1 = T.sum(prob_Class7, axis=1)\r\n prob_Class1_2 = prob_Class2_1 + prob_Class2_2\r\n prob_Class1 = T.concatenate(\r\n [T.shape_padright(prob_Class1_1),\r\n T.shape_padright(prob_Class1_2),\r\n T.shape_padright(prob_Class1_3)], axis=1)\r\n prob_Class2 = T.concatenate(\r\n [T.shape_padright(prob_Class2_1),\r\n T.shape_padright(prob_Class2_2)], axis=1)\r\n prob_Class4 = T.concatenate(\r\n [T.shape_padright(prob_Class4_1),\r\n T.shape_padright(prob_Class4_2)], axis=1)\r\n \r\n # the following probabilities should sum up to 1, so we use SoftMax\r\n # to predict all of them\r\n ind2 = [14, 18, 19, 20, 21, 24, 23, 24] \r\n p2 = SoftMax(lin_output[:,ind2])\r\n prob_Class6_2 = p2[:,0]\r\n prob_Class8 = p2[:,1:8]\r\n prob_Class6_1 = T.sum(prob_Class8, axis=1)\r\n prob_Class6 = T.concatenate(\r\n [T.shape_padright(prob_Class6_1),\r\n T.shape_padright(prob_Class6_2)], axis=1)\r\n \r\n # for the following probabilities, we resort to the same strategy in\r\n # the \"top\" option\r\n # class 3\r\n prob_Class3 = SoftMax(lin_output[:,5:7])\r\n # weight these probabilities using the probability of class 2.2\r\n prob_Class3 *= T.shape_padright(prob_Class2[:,1])\r\n \r\n # class 5\r\n prob_Class5 = SoftMax(lin_output[:,9:13])\r\n # weight these probabilities using the probability of class 2.2\r\n prob_Class5 *= T.shape_padright(prob_Class2[:,1])\r\n \r\n # class 10\r\n prob_Class10 = SoftMax(lin_output[:,28:31])\r\n # weight these probabilities using the probability of class 4.1\r\n prob_Class10 *= T.shape_padright(prob_Class4[:,0])\r\n \r\n # concatenate all the probabilities into a single tensor variable\r\n self.y_pred = T.concatenate(\r\n [prob_Class1, prob_Class2, prob_Class3, prob_Class4,\r\n prob_Class5, prob_Class6, prob_Class7, prob_Class8,\r\n prob_Class9, prob_Class10, prob_Class11], axis=1)\r\n \r\n \r\n # parameters of the model\r\n self.params = [self.W, self.b]", "def num_classes_b(self):\r\n return self._num_classes_b", "def make_bprod(self):\n rhs1 = random.choice(self.nonterminals)\n rhs2 = random.choice(self.nonterminals)\n lhs = random.choice(self.nonterminals)\n return (lhs, (rhs1, rhs2))", "def classified_weighted_assess(jid,DfA,gbs,wg,DfB,vl):\n\n #Some notes:\n ##simple count\n #yyy1=DfA[jid].groupby(DfA['adid']).count()\n\n ##connect classes to subclasses\n #yyy2=DfA[jid].groupby(DfA['adid']).unique()\n\n ##below is it: count subclasses kinds numbers\n #yyy3=DfA[jid].groupby(DfA['adid']).unique().apply(len)\n\n ##count subclasses instances numbers\n #yyy4=DfA[jid].groupby(DfA['adid']).value_counts()\n\n #yyy5=DfA.groupby(gbs).size()\n ##yyy6=DfA[jid].groupby(DfA[gbs]).size() #wrong!\n\n #Define some names for new columns.\n jid_num=brcadd(jid,'s_num')\n tot_wg=brcadd('tot_',wg,'_',jid)\n wg_pared_jid=brcadd(jid,'_pared_'+wg)\n vl_jid=brcadd(vl,'_',jid)\n vl_pared_jid=brcadd(jid,'_pared_',vl)\n tot_vl_pared_jid=brcadd('tot_',vl_pared_jid)\n\n #A groupby gbs to get jid counts by gbs.\n A_jidno_gbs=DfA[jid].groupby([DfA[cl] for cl in gbs]).count().rename(jid_num)\n\n #A weights groupby uid to get total weights of uids.\n xxx=DfA[wg].groupby(DfA[jid]).sum()\n\n #Put weights in B\n #Those jid which are not in DfA may result in some jids in DfB don't have weight wg.\n DfBB=DfB.join(xxx,on=jid)\n DfBB=DfBB.rename(columns={wg:tot_wg})\n\n #Merge A and B. This may cause problems if the jids of the two are not the same.\n A_B=DfA.merge(DfBB,on=jid,how='left')\n A_B=A_B.rename(columns=dict(zip(vl,vl_jid)))\n\n #Compute the components ratios of row for the user.\n A_B[wg_pared_jid]=A_B[wg]/A_B[tot_wg]\n #Compute the corresponding value for the user.\n A_B[vl_pared_jid]=A_B[vl_jid].multiply(A_B[wg_pared_jid], axis=\"index\")\n\n #The combination of A and B group sum by gbs to get the total wg and values by gbs.\n A_B_gbs=A_B.groupby(gbs).sum()\n A_B_gbs=A_B_gbs.rename(columns=dict(zip(vl_pared_jid,tot_vl_pared_jid)))\n\n #Join the weights of uids and pick the required columns.\n A_B_gbs=A_B_gbs.join(A_jidno_gbs)\n A_gbs_vl=A_B_gbs[list(flatten_one([wg,jid_num,tot_vl_pared_jid]))]\n# yyy=DfB.loc[:, [jid]].join(xxx,on=jid)\n yyy=DfBB[[jid,tot_wg]]\n\n return A_gbs_vl,yyy", "def abberationType(self, abbs):\n # Super slow and broken! May not be worth the extra work to fix...\n results = []\n abbs_proc = [] # For tracking processed abbs\n query = \"SELECT f.uniquename AS fbid, db.name AS db,\" \\\n \"dbx.accession AS acc \" \\\n \"FROM feature f \" \\\n \"JOIN cvterm gross_type ON gross_type.cvterm_id=f.type_id \" \\\n \"JOIN feature_cvterm fc ON fc.feature_id = f.feature_id \" \\\n \"JOIN cvterm fine_type ON fine_type.cvterm_id = fc.cvterm_id \" \\\n \"JOIN feature_cvtermprop fctp ON fctp.feature_cvterm_id = fc.feature_cvterm_id \" \\\n \"JOIN cvterm meta ON meta.cvterm_id = fctp.type_id \" \\\n \"JOIN cvterm gtyp ON gtyp.cvterm_id = f.type_id \" \\\n \"JOIN dbxref dbx ON fine_type.dbxref_id = dbx.dbxref_id \" \\\n \"JOIN db ON dbx.db_id = db.db_id \" \\\n \"WHERE gross_type.name = 'chromosome_structure_variation' -- double checks input gross type\" \\\n \"AND meta.name = 'wt_class'\" \\\n \"AND f.uniquename in (%s)\" % (\"'\" + \"'.'\".join(abbs))\n dc = self.query_fb(query)\n for d in dc:\n results.append((d['fbid'], d['db'] + '_' + d['acc']))\n abbs_proc.append(d['fbid'])\n [results.append((a, 'SO_0000110')) for a in abbs if\n a not in abbs_proc] # Defaulting to generic feature id not abb\n return results", "def bayes_binomial_ratio_err(k1,n1, k2,n2, prior1=[0.5,0.5], prior2=[0.5,0.5],\n a = None, sigma_a = None, b = None, sigma_b = None, ab_prior_type=['Normal', 'Normal'],\n nd=1000, nd_interp=2000, rmax = None, rval = None, CL=[0.025, 0.975],\n nd_y=1500, nd_nuisance=20, int_nncut=5, int_prec=0.1, numerics='numerical', renorm=True,\n gEPS = 0.1):\n\n # --------------------------------------------------------------------\n # Numerical protection\n if a is not None:\n if (sigma_a / a) < gEPS:\n cprint(f'Forcing normal prior(a) pdf for numerical protection','yellow')\n ab_prior_type[0] = 'Normal'\n\n if b is not None:\n if (sigma_b / b) < gEPS:\n cprint(f'Forcing normal prior(b) pdf for numerical protection','yellow')\n ab_prior_type[1] = 'Normal'\n # --------------------------------------------------------------------\n\n if prior1 == 'Flat':\n prior1 = [1, 1]\n if prior1 == 'Jeffrey':\n prior1 = [0.5, 0.5]\n if prior1 == 'Haldane':\n prior1 = [0, 0]\n\n if prior2 == 'Flat':\n prior2 = [1, 1]\n if prior2 == 'Jeffrey':\n prior2 = [0.5, 0.5]\n if prior2 == 'Haldane':\n prior2 = [0, 0]\n\n print(__name__ + f'.bayes_binomial_ratio: prior1 = {prior1}, prior2 = {prior2}')\n\n # Beta prior parameters\n alpha1,beta1 = prior1[0],prior1[1]\n alpha2,beta2 = prior2[0],prior2[1]\n\n # --------------------------------------------------------------------\n # y-integral samples for each pdf(r) point\n def integrand(r, y, k1_new, k2_new):\n return np.abs(y)*binom_post_2D(p1=r*y, p2=y, \\\n k1=k1_new,n1=n1, k2=k2_new,n2=n2, alpha1=alpha1,beta1=beta1, alpha2=alpha2,beta2=beta2)\n\n # --------------------------------------------------------------------\n # Return scale prior pdf values\n def get_ab_prior_pdf(x,mu,sigma, mode):\n\n if mode == 'Gamma':\n gamma_k, gamma_theta = gamma_param_estimate(mu=mu, sigma=sigma)\n print(f'Gamma pdf param k={gamma_k:0.5f}, theta={gamma_theta:0.5f}')\n\n return functions.gamma_pdf(x=x, k=gamma_k, theta=gamma_theta)\n\n elif mode == 'Normal':\n return functions.normpdf(x=x, mu=mu, std=sigma)\n\n else:\n raise Except(f'.bayes_binomial_ratio_err: Unknown scale prior type = {ab_prior_type}')\n\n # --------------------------------------------------------------------\n # Integration range\n def genrange(u, sigma_u, k, n):\n\n MIN = u - int_nncut*sigma_u\n MAX = u + int_nncut*sigma_u\n \n # Boundary control\n if MIN*k < 1: MIN = 1/k \n if MAX*k > n: MAX = n/k\n\n return np.linspace(MIN, MAX, nd_nuisance)\n\n # --------------------------------------------------------------------\n\n # Set maximum ratio to the upper tail\n if rmax is None:\n rmax = 6 * (k1/n1) / (k2/n2)\n\n # Random variable p discretized on a reasonably large interval (loop checks the discretization)\n trials = 1\n while True:\n if rval is None or trials > 1:\n rval = np.linspace(0, rmax, trials * nd)\n pdf = np.zeros(len(rval))\n\n # Via arbitrary precision library (can be very slow for large numbers)\n if numerics == 'mpmath':\n \n pdf = [bayes_posterior_ratio(rval[i], k1,n1, k2,n2, alpha1,beta1, alpha2,beta2) for i in tqdm(range(len(rval)))]\n\n # Via numerical integration\n elif numerics == 'numerical':\n\n pdf = np.zeros(len(rval))\n yval = np.linspace(0,1, nd_y)\n\n # ============================================================\n # Nuisance scale parameters\n\n k1_new = None\n k2_new = None\n\n if a is not None:\n aval = genrange(u=a, sigma_u=sigma_a, k=k1, n=n1)\n a_prior = get_ab_prior_pdf(x=aval, mu=a, sigma=sigma_a, mode=ab_prior_type[0])\n k1_new = aval*k1\n\n # Compute re-normalization (can be crucial near zero, when the left tail is truncated)\n Z = simps(x=aval, y=a_prior); print(f'Prior scale param [a] {ab_prior_type[0]} pdf norm. integral: {Z}')\n a_prior /= Z\n\n if b is not None:\n bval = genrange(u=b, sigma_u=sigma_b, k=k2, n=n2)\n b_prior = get_ab_prior_pdf(x=bval, mu=b, sigma=sigma_b, mode=ab_prior_type[1])\n k2_new = bval*k2\n\n # Compute re-normalization (can be crucial near zero, when the left tail is truncated)\n Z = simps(x=bval, y=b_prior); print(f'Prior scale param [b] {ab_prior_type[1]} pdf norm. integral: {Z}')\n b_prior /= Z\n\n # ============================================================\n # Construct PDF(r) numerically. Bayes denominator (normalization) already handled.\n\n # Apply prior scales a (b) to k1 (k2) and the binomial boundary condition.\n # [Note: cannot apply to p1 (p2) => would result formally\n # in an unidentifiable model (singular Fisher information), at least if a (b)\n # would be floating parameters.\n\n # Only a\n if a is not None and b is None:\n print(__name__ + f'.bayes_binomial_ratio_err: Numerator prior scale param a = ({a}, {sigma_a})')\n \n for i in tqdm(range(len(rval))):\n Ia = np.zeros(len(aval))\n\n for j in range(len(aval)):\n I = integrand(r=rval[i], y=yval, k1_new=k1_new[j], k2_new=k2)\n Ia[j] = simps(x=yval, y=I)\n\n # ***\n pdf[i] = simps(x=aval, y=Ia*a_prior)\n\n # Only b\n elif a is None and b is not None:\n print(__name__ + f'.bayes_binomial_ratio_err: Denominator prior scale param b = ({b}, {sigma_b})')\n \n for i in tqdm(range(len(rval))):\n Ib = np.zeros(len(bval))\n\n for j in range(len(bval)):\n I = integrand(r=rval[i], y=yval, k1_new=k1, k2_new=k2_new[j])\n Ib[j] = simps(x=yval, y=I)\n\n # ***\n pdf[i] = simps(x=bval, y=Ib*b_prior)\n\n # Both a and b\n elif a is not None and b is not None:\n print(__name__ + f'.bayes_binomial_ratio_err: Num. and denom. prior scale param a = ({a}, {sigma_a}) and b = ({b}, {sigma_b})')\n\n for i in tqdm(range(len(rval))):\n\n Ia = np.zeros(len(aval))\n for j in range(len(aval)):\n\n Ib = np.zeros(len(bval))\n for k in range(len(bval)):\n I = integrand(r=rval[i], y=yval, k1_new=k1_new[j], k2_new=k2_new[k])\n Ib[k] = simps(x=yval, y=I)\n\n Ia[j] = simps(x=bval, y=Ib*b_prior)\n\n # ***\n pdf[i] = simps(x=aval, y=Ia*a_prior)\n\n # The no nuisance parameters case\n else:\n print(__name__ + f'.bayes_binomial_ratio_err: No prior (scale) parameters.')\n\n for i in tqdm(range(len(rval))):\n I = np.abs(yval)*binom_post_2D(p1=rval[i]*yval, \\\n p2=yval, k1=k1,n1=n1, k2=k2,n2=n2, alpha1=alpha1,beta1=beta1, alpha2=alpha2,beta2=beta2)\n pdf[i] = simps(x=yval, y=I)\n else:\n raise Exception(__name__ + f'.bayes_binomial_ratio_err: Unknown numerics method {numerics}')\n\n # Interpolate\n f2 = interp1d(rval, pdf, kind='quadratic', fill_value='extrapolate')\n r_dense = np.linspace(0, rmax, nd_interp)\n pdf_dense = f2(r_dense)\n \n # Check normalization\n I = simps(y=pdf_dense, x=r_dense)\n if np.abs(I-1) > int_prec:\n trials += 1\n if numerics == 'numerical':\n nd_y *= 2\n nd_nuisance *= 2\n print(__name__ + f'.bayes_binomial_ratio_err: Posterior integral {I:.6f} => increasing discretization')\n if trials > 10:\n raise Exception(__name__ + f'bayes_binomial_ratio_err: PDF(r) normalization I={I} error (set tech-parameters manually)') \n else:\n break\n \n # Normalization of the posterior PDF to unit integral\n if renorm:\n pdf_dense /= simps(x=r_dense, y=pdf_dense)\n\n print(__name__ + f' >> Posterior integral before: {I:.6f} | after: {simps(x=r_dense, y=pdf_dense)}')\n\n discrete_pdf = pdf_dense / np.sum(pdf_dense) # Normalize to discrete PDF\n discrete_cdf = np.cumsum(discrete_pdf) # Discrete CDF\n CR_val,CR_ind = tools.cdf_percentile(discrete_cdf, r_dense, CL)\n \n output = {\n 'val' : r_dense,\n 'pdf' : pdf_dense,\n 'discrete_pdf': discrete_pdf,\n 'discrete_cdf': discrete_cdf,\n 'CR_value' : CR_val,\n 'CR_index' : CR_ind\n }\n return output", "def __init__(self, x: Bn, X1: G1Element, y: Dict[str, Bn]):\n self.x = x\n self.X1 = X1\n self.y = y", "def second_class_tp(p,n):\n c = np.zeros(n)\n d = np.zeros(p)\n ucon = np.zeros(n)\n lcon = np.zeros(n)\n \n #uvar = np.ones(n)*1\n uvar = np.ones(n)*5\n lvar = np.ones(n)*0.5\n name = str(p)+'_'+str(n)+'_'+str(n)+'_l1_tp'+'.txt'\n #name = str(n)+'_'+str(p)+'_'+'_second_tp'+'.txt'\n Q = rog.hilb(p,n)\n # d=(di), di=sum qij for i= 1,...,p\n for i in range(p): \n d[i]= Q[i,:].sum()\n B = np.zeros((n,n))\n return Q,B,d,c,lcon,ucon,lvar,uvar,name", "def getMetricsClass(pred_bboxes, gt_bboxes, nclasses):\r\n aps = []\r\n iou = []\r\n for cls in range(nclasses):\r\n if bool(pred_bboxes):\r\n if len(pred_bboxes[0]) == 4: \r\n avg_precision_class, iou_class = getMetrics(pred_bboxes, gt_bboxes)\r\n if len(pred_bboxes[0]) == 5:\r\n avg_precision_class, iou_class = getMetrics(pred_bboxes, gt_bboxes, confidence = True)\r\n else:\r\n avg_precision_class = 0\r\n iou_class = 0\r\n\r\n aps.append(avg_precision_class)\r\n iou.append(iou_class)\r\n \r\n return np.mean(aps), np.mean(iou)", "def test_naive_bayes_soy(test_set, classes, class_probabilities, class_feature_probs):\n\n print('[ INFO ]: Testing soy data with Naive Bayes Classifier...')\n\n class_results = {}\n scores = {}\n\n for soy_class in classes:\n\n # Create new column for class predictions\n feature_set = test_set.drop(classes, axis=1)\n feature_set['pred_class'] = 0\n true_class = test_set[soy_class]\n\n for row in range(len(feature_set)):\n\n # Initialize probability sums for each class\n true_probs_sum = 1\n false_probs_sum = 1\n true_conditional_prob_sum = 1\n false_conditional_prob_sum = 1\n\n for col in feature_set.columns:\n\n if col != 'pred_class':\n\n # Calculate probabilities assuming the class is present or 1\n if feature_set[col].iloc[row] == 1:\n\n # Compute conditional feature probabilities based on\n # wether or not the feature is present (1 or 0)\n true_prob = class_feature_probs[soy_class][0].get(col)\n false_prob = 1 - class_feature_probs[soy_class][1].get(col)\n\n else:\n\n # Calculate probabilities assuming the class is not present or 0\n true_prob = 1 - class_feature_probs[soy_class][0].get(col)\n false_prob = class_feature_probs[soy_class][1].get(col)\n\n # Multiply all feature probabilities together for each record\n true_probs_sum = true_probs_sum * true_prob\n false_probs_sum = false_probs_sum * false_prob\n\n # Multiply class conditional probabilities by conditional feature probabilities\n true_conditional_prob_sum = class_probabilities[soy_class] * true_probs_sum\n false_conditional_prob_sum = (1 - class_probabilities[soy_class]) * false_probs_sum\n\n # Determine which probability is highest - highest one is selected as the prediction value\n if true_conditional_prob_sum > false_conditional_prob_sum:\n feature_set['pred_class'].iloc[row] = 1\n\n # Place the results into a data frame for comparison\n results = pd.concat([feature_set['pred_class'], true_class], axis=1)\n results.columns = ['pred_class', 'true_class']\n class_results[soy_class] = results\n\n # Calculate the number of TP, TN, FP, FN\n true_positives = len(results.loc[(results['true_class'] == 1) & (results['pred_class'] == 1)])\n true_negatives = len(results.loc[(results['true_class'] == 0) & (results['pred_class'] == 0)])\n false_positives = len(results.loc[(results['true_class'] == 0) & (results['pred_class'] == 1)])\n false_negatives = len(results.loc[(results['true_class'] == 1) & (results['pred_class'] == 0)])\n\n scores[soy_class] = {\n 'TP' : true_positives,\n 'TN' : true_negatives,\n 'FP' : false_positives,\n 'FN' : false_negatives\n }\n\n return class_results, scores", "def __init__(self, attributes: List[AttributeName], g1: G1Element, Y1: Dict[str, G1Element], g2: G2Element, X2: G2Element, Y2: Dict[AttributeName, G2Element]):\n self.attributes = attributes\n self.g1 = g1\n self.Y1 = Y1\n self.g2 = g2\n self.X2 = X2\n self.Y2 = Y2", "def _get_arg_vb_class(self, a_toks):\n ret = [0] * 7\n for _, p in a_toks:\n if p in VB_TAG2POS:\n ret[VB_TAG2POS[p]] = 1.\n return ''.join(str(t) for t in ret)", "def elemental_descriptor(A1_ion, A2_ion, B_ion):\n ele_A1 = mg.Element(A1_ion)\n ele_A2 = mg.Element(A2_ion)\n ele_B = mg.Element(B_ion)\n ele_O = mg.Element('O') \n # A/B ion oxidation state \n common_oxidation_states_A1 = ele_A1.common_oxidation_states[0]\n common_oxidation_states_A2 = ele_A2.common_oxidation_states[0]\n common_oxidation_states_A = np.mean(common_oxidation_states_A1 + common_oxidation_states_A2)\n common_oxidation_states_B = ele_B.common_oxidation_states[0]\n # ionic radius property\n ionic_radius_A1 = float(str(ele_A1.average_ionic_radius)[:-4])\n ionic_radius_A2 = float(str(ele_A2.average_ionic_radius)[:-4])\n ionic_radius_A = (ionic_radius_A1+ ionic_radius_A2)/2\n ionic_radius_B = float(str(ele_B.average_ionic_radius)[:-4])\n ionic_radius_O = float(str(ele_O.average_ionic_radius)[:-4])\n # Tolerance factor \n TF = (ionic_radius_A + ionic_radius_O)/(np.sqrt(2)*(ionic_radius_B + ionic_radius_O))\n # Octahedral factor\n OF = ionic_radius_B/ionic_radius_O \n # ionic_radius ratios\n ionic_ration_AO = ionic_radius_A / ionic_radius_O\n ionic_ration_BO = ionic_radius_B / ionic_radius_O\n # averaged electronegativity for A and B atoms\n Pauling_electronegativity_A1 = ele_A1.X\n Pauling_electronegativity_A2 = ele_A2.X\n Pauling_electronegativity_A = (Pauling_electronegativity_A1 + Pauling_electronegativity_A2)/2\n Pauling_electronegativity_B = ele_B.X\n Pauling_electronegativity_O = ele_O.X\n # Difference in the electronegativity for A-O and B-O\n Diff_A_O = Pauling_electronegativity_A - Pauling_electronegativity_O\n Diff_B_O = Pauling_electronegativity_B - Pauling_electronegativity_O\n return [common_oxidation_states_A, common_oxidation_states_B, Pauling_electronegativity_A, Pauling_electronegativity_B, TF, OF, ionic_ration_AO, ionic_ration_BO, Diff_A_O, Diff_B_O]", "def bernoulli(train_data, train_labels, test_data, test_labels, data_set1=True, combined=None):\n\n DECISION_TREE_ACCURACIES = {\n 'Accuracy_train': 0,\n 'Accuracy_test': 0\n }\n ALPHA = [0, .01, .025, .05, .075, 0.1, 0.2, 0.3, .5, .75, 1, 1.5, 2.5]\n #ALPHA = [0, 0.175, 0.190, 0.195, 0.2, 0.205, 0.21, 0.225]\n\n FIT_PRIOR = [True, False]\n\n for alpha, fit_prior in itertools.product(ALPHA, FIT_PRIOR):\n bern = BernoulliNB(alpha=alpha, fit_prior=fit_prior)\n bern.fit(train_data, train_labels)\n\n pred_test = bern.predict(test_data)\n acc = accuracy_score(test_labels, pred_test)\n print(\"Alpha: {} Fit Prior: {} Accuracy: {}\".format(alpha, fit_prior, acc))\n\n if acc > DECISION_TREE_ACCURACIES['Accuracy_test']:\n DECISION_TREE_ACCURACIES['Accuracy_test'] = acc # todo this line is new, test\n DECISION_TREE_ACCURACIES['Alpha'] = alpha\n DECISION_TREE_ACCURACIES['Fit_prior'] = fit_prior\n pred_train = bern.predict(train_data)\n acc_ = accuracy_score(train_labels, pred_train)\n DECISION_TREE_ACCURACIES['Accuracy_train'] = acc_\n\n bern = BernoulliNB(alpha=DECISION_TREE_ACCURACIES['Alpha'],\n fit_prior=DECISION_TREE_ACCURACIES['Fit_prior'])\n\n if combined is not None:\n bern.fit(combined[0], combined[1]) # both first sets given, extra data == extra training\n else:\n bern.fit(train_data, train_labels)\n\n # save the trained model\n file_name = 'ds1TEST-nb.pkl' if data_set1 else 'ds2TEST-nb.pkl'\n with open(file_name, 'wb') as file:\n pickle.dump(bern, file)\n\n return bern, DECISION_TREE_ACCURACIES", "def classified_weighted_assess(jid,DfA,gbs,wg,DfB,vl):\n \n #Some notes:\n ##simple count\n #yyy1=DfA[jid].groupby(DfA['adid']).count()\n \n ##connect classes to subclasses\n #yyy2=DfA[jid].groupby(DfA['adid']).unique()\n \n ##below is it: count subclasses kinds numbers\n #yyy3=DfA[jid].groupby(DfA['adid']).unique().apply(len)\n \n ##count subclasses instances numbers\n #yyy4=DfA[jid].groupby(DfA['adid']).value_counts()\n \n #yyy5=DfA.groupby(gbs).size()\n ##yyy6=DfA[jid].groupby(DfA[gbs]).size() #wrong!\n \n #Define some names for new columns.\n jid_num=brcadd(jid,'s_num')\n tot_wg=brcadd('tot_',wg,'_',jid) \n wg_pared_jid=brcadd(jid,'_pared_'+wg) \n vl_jid=brcadd(vl,'_',jid)\n vl_pared_jid=brcadd(jid,'_pared_',vl)\n tot_vl_pared_jid=brcadd('tot_',vl_pared_jid) \n \n #A groupby gbs to get jid counts by gbs.\n A_jidno_gbs=DfA[jid].groupby([DfA[cl] for cl in gbs]).count().rename(jid_num)\n \n #A weights groupby uid to get total weights of uids.\n xxx=DfA[wg].groupby(DfA[jid]).sum()\n \n #Put weights in B\n #Those jid which are not in DfA may result some jid in DfB don't have weight wg.\n DfB=DfB.join(xxx,on=jid)\n DfB.rename(columns={wg:tot_wg},inplace=True) \n \n #Merge A and B. This may cause problems if the jids of the two are not the same.\n A_B=DfA.merge(DfB,on=jid,how='left')\n A_B.rename(columns=dict(zip(vl,vl_jid)),inplace=True)\n \n #Compute the components ratios of row for the user.\n A_B[wg_pared_jid]=A_B[wg]/A_B[tot_wg]\n #Compute the corresponding value for the user.\n A_B[vl_pared_jid]=A_B[vl_jid].multiply(A_B[wg_pared_jid], axis=\"index\")\n \n #The combination of A and B group sum by gbs to get the total wg and values by gbs.\n A_B_gbs=A_B.groupby(gbs).sum()\n A_B_gbs.rename(columns=dict(zip(vl_pared_jid,tot_vl_pared_jid)),inplace=True)\n \n #Join the weights of uids and pick the required columns.\n A_B_gbs=A_B_gbs.join(A_jidno_gbs)\n A_gbs_vl=A_B_gbs[list(flatten_one([wg,jid_num,tot_vl_pared_jid]))]\n \n return A_gbs_vl,DfB", "def test_bayes_factor_b(self):\n model_1 = ufloat(2, 1)\n model_2 = ufloat(4, 1)\n expected_result = ufloat(-4, 2.82842712474619032)\n actual_result = utils.bayes_factor(model_1, model_2)\n assert_almost_equal(actual_result.n, expected_result.n)\n assert_almost_equal(actual_result.s, expected_result.s)", "def classify_comm(bf, t1, t2):\n # Per Special --> all 3 targets OpI\n if is_opi(bf, t1) and is_opi(t1, t2) and is_opi(t2, bf):\n return \"Per Special\"\n # Cyclic Shift --> all 3 targets coplanar and AnI\n if (not (is_inter(bf, t1) or is_inter(t1, t2) or is_inter(t2, bf))) and \\\n is_coplanar(bf, t1 ,t2):\n return \"Cyclic Shift\"\n # Orthogonal --> all 3 targets non-interchangeable, 2 targets opposite to buffer\n if (not (is_inter(bf, t1) or is_inter(t1, t2) or is_inter(t2, bf))) and \\\n is_oni(bf, t1) and is_oni(bf, t2) and is_oni(t1, t2):\n return \"Orthogonal\"\n # Columns --> 2 targets OpI, 3rd not interchangeable to first 2\n if has_column(bf, t1, t2):\n return \"Columns\"\n # Else, is Pure/A9\n return \"Pure/A9\"", "def b12(self,k1,k2,c):\n return 2.0/3.0*(1-self.mu)*c.pkInterp(k1)*c.pkInterp(k2)", "def dbgain(self, pt_1, pt_2):\n raise NotImplementedError", "def generate_cons_pos_all_info(cons_pos_all,all_gpcrs_info):\n for prot_info in all_gpcrs_info:\n cons_pos_prot = prot_info[4]\n for gpcr_class, cons_class_lists in cons_pos_prot.items():\n if cons_class_lists:\n list_num=0 # list 0 or 1\n while list_num < len(cons_class_lists):\n cons_pos_li=cons_class_lists[list_num]\n cons_pos_num = 0\n while cons_pos_num < len(cons_pos_li):\n cons_pos_info=cons_pos_li[cons_pos_num]\n if cons_pos_info[2] != \"None\":\n cons_pos_all[gpcr_class][list_num][cons_pos_num][2]+=(cons_pos_info[2]+\",\")\n cons_pos_num +=1\n list_num+=1\n show_class={}\n for gpcr_class, cons_pos_class in cons_pos_all.items():\n for cons_pos_li in cons_pos_class:\n for cons_pos in cons_pos_li:\n if cons_pos[2]:\n cons_pos[2]=cons_pos[2].rstrip(\",\")\n else:\n cons_pos[1]=\"Position not found.\"\n cons_pos[2]=\"None\"\n show_class[gpcr_class]=True\n active_class_all= {'A': ['', ''], 'C': ['', ''], 'F': ['', ''], 'B': ['', '']}\n classes=sorted(cons_pos_all)\n active_class_all[classes[0]]=['active', 'in active']\n return (cons_pos_all,show_class,active_class_all)", "def __init__(self, alpha=80, beta=13, gamma=3, spatial_ker_weight=3, bilateral_ker_weight=10):\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.spatial_ker_weight = spatial_ker_weight\n self.bilateral_ker_weight = bilateral_ker_weight", "def B(alpha, beta):\n return math.gamma(apha) * math.gamma(beta) / math.gamma(alpha + beta)", "def part3c_2():\n xs = \"Werner & Co entered court today . Werner maintained that they were not guilty .\".split()\n N = 10000\n\n submission.computeGibbsProbabilities( englishCRF,\n submission.getCRFBlocks,\n submission.chooseGibbsCRF,\n xs, N )\n grader.requireIsTrue(True)", "def __init__(self, n, sents, corpus='', beta=None, addone=True):\n self.n = n\n self.beta = beta\n self.corpus = corpus\n self.beta_flag = True\n self.addone = addone\n self.smoothingtechnique = 'Back Off (Katz) with Discounting Smoothing'\n self.counts = counts = defaultdict(int)\n self.A_set = defaultdict(set)\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = set(voc)\n if beta is None:\n self.beta_flag = False\n\n # if no beta given, we compute it\n if not self.beta_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent por training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n for sent in train_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(train_sents)\n counts[('</s>',)] = len(train_sents)\n\n self.tocounts = counts\n # search for the beta that gives lower perplexity\n beta_candidates = [i*0.1 for i in range(1, 10)]\n # xs is a list with (beta, perplexity)\n xs = []\n self.sents = train_sents\n for aux_beta in beta_candidates:\n self.beta = aux_beta\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_beta, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.beta = xs[0][0]\n with open('old-stuff/backoff_'+str(n)+'_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Beta: {}\\n'.format(self.beta))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n else:\n sents = list(map((lambda x: x + ['</s>']), sents))\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n\n for sent in sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(sents)\n counts[('</s>',)] = len(sents)", "def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining=10):\n self.alpha = float(alpha)\n self.epsilon = float(epsilon)\n self.discount = float(gamma)\n self.numTraining = int(numTraining)", "def addBanClass(x:ResidueDict)->ResidueDict:\n banClass:str = run(matchStrandToClass(x.struct,x.strand_id))\n x.banClass = banClass\n return x", "def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining = 10):\n self.alpha = float(alpha)\n self.epsilon = float(epsilon)\n self.discount = float(gamma)\n self.numTraining = int(numTraining)", "def reconcile(self):\r\n\t\tclass_A = np.empty((self.basis.nK,self.basis.nK),float)\r\n\t\tclass_G = np.empty((self.basis.nK,self.basis.nK),float)\r\n\t\tclass_koop = np.empty((self.basis.nK,self.basis.nK),float)\r\n\t\tclass_num = len(self.koop_cluster_list)\r\n\t\tfor i in range(class_num):\r\n\t\t\tclass_mem_num = len(self.koop_cluster_list[i])\r\n\t\t\tclass_counter = 0.0\r\n\t\t\tfor j in range(class_mem_num):\r\n\t\t\t\tclass_A += (self.koop_cluster_list[i][j]._A*self.koop_cluster_memb_prob_list[i][j])/float(class_mem_num)\r\n\t\t\t\tclass_G += (self.koop_cluster_list[i][j]._G*self.koop_cluster_memb_prob_list[i][j])/float(class_mem_num)\r\n\t\t\t\tclass_counter += self.koop_cluster_list[i][j].counter\r\n\t\t\tclass_koop = np.dot(np.linalg.pinv(class_G),class_A)\r\n\t\t\tself.koopman_hybrid_modes.append(KoopmanOperator(self.basis, class_koop, class_A, class_G, class_counter))", "def part1b_2():\n xs = exampleInput\n z = 5.881\n forward = [\n Counter({'-FEAT-': 0.622, '-SIZE-': 0.377}), \n Counter({'-SIZE-': 0.761, '-FEAT-': 0.238}), \n Counter({'-SIZE-': 0.741, '-FEAT-': 0.258})]\n \n z_, forward_ = submission.computeForward(simpleCRF, xs)\n for vec, vec_ in zip( forward, forward_):\n grader.requireIsTrue( Counters.approximateEquals( vec, vec_ ) )\n grader.requireIsEqual( z, z_, 1e-2)", "def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels", "def RestrictionOneToManyDependency(self, alphaCompId, betaCompId, noInstances):\n if self.solverTypeOptimize:\n\n bvars1 = [(self.a[alphaCompId * self.nrVM + j], noInstances) for j in range(self.nrVM)]\n bvars2 = [(self.a[betaCompId * self.nrVM + j], -1) for j in range(self.nrVM)]\n bvars = bvars1 + bvars2\n self.solver.add(PbGe(bvars, 0))\n else:\n self.solver.assert_and_track(\n PbGe(noInstances * sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) -\n sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]), 0), \"LabelOneToMany: \" + str(self.labelIdx))\n self.labelIdx += 1\n\n if self.solverTypeOptimize:\n bvars1 = [(self.a[alphaCompId * self.nrVM + j], noInstances) for j in range(self.nrVM)]\n bvars2 = [(self.a[betaCompId * self.nrVM + j], -1) for j in range(self.nrVM)]\n bvars = bvars1 + bvars2\n self.solver.add(PbLe(bvars, 1 + noInstances))\n\n\n\n else:\n self.solver.assert_and_track(\n PbLe(noInstances *\n sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) -\n sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)])-1, noInstances),\n \"LabelOneToMany: \" + str(self.labelIdx))\n self.labelIdx += 1", "def b(self,k1,k2,cosTheta,c):\n return self.b1(k1, k2, cosTheta,c) + \\\n self.b1(k1, self.k3Length(k1, k2, cosTheta), \\\n self.cos1(k1, k2, cosTheta),c) +\\\n self.b1(k2, self.k3Length(k2, k1, cosTheta), \\\n self.cos1(k2, k1, cosTheta),c)", "def __call__(self, f1, f2):\n r = len(set(f1.features) ^ set(f2.features))\n\n return exp(-self.gamma * r)", "def train_naive_bayes_soy(train_set, classes):\n\n print('[ INFO ]: Training soy data with Naive Bayes Classifier...')\n\n class_probabilities = {}\n class_feature_probs = {}\n\n for soy_class in classes:\n\n feature_true_probs = {}\n feature_false_probs = {}\n\n # Find the probability that each class is in the training set\n class_probabilities[soy_class] = len(train_set[(train_set[soy_class] == 1)]) / len(train_set)\n\n # Compute the conditional feature probabilities based on the class probabilities\n # where the class is present\n class_true = train_set[(train_set[soy_class] == 1)]\n for col in class_true.columns:\n if col not in classes:\n try:\n true_true = len(class_true[(class_true[col] == 1)]) / len(class_true)\n except:\n true_true = 0\n feature_true_probs[col] = true_true\n\n # Compute the conditional feature probabilities based on the class probabilities\n # where the class is not present\n class_false = train_set[(train_set[soy_class] == 0)]\n for col in class_false.columns:\n if col not in classes:\n try:\n false_false = len(class_false[(class_false[col] == 0)]) / len(class_false)\n except:\n false_false = 0\n feature_false_probs[col] = false_false\n\n class_feature_probs[soy_class] = [feature_true_probs, feature_false_probs]\n\n return class_probabilities, class_feature_probs", "def __init__(self, data, class_column):\n print(\"Naive Bayes Model created!\")\n\n # create report\n self.predict_summary = {}\n self.fit_report = {}\n\n # self.data=data\n self.data = data\n self.class_column = class_column\n\n # get the class column and get classes\n col_data = self.data[class_column]\n self.class_list = unique_list(col_data)\n\n # get numeric columns and categorical columns\n self.num_cols, self.cat_cols = get_both_columns(self.data, class_column)\n\n # Build the pro\n self.prob_hub = {}", "def bvlprms():\n # cretate modifrs \"BEVEL\"\n bpy.ops.transform.edge_bevelweight(value=1)\n bpy.ops.object.modifier_add(type='BEVEL')\n bnm = bpy.context.object.modifiers[- 1].name\n # my paramtrs a \"BEVEL\"\n bpy.context.object.modifiers[bnm].use_clamp_overlap = self.bclmp\n bpy.context.object.modifiers[bnm].limit_method = 'WEIGHT'\n bpy.context.object.modifiers[bnm].width = self.lst2\n bpy.context.object.modifiers[bnm].segments = 6\n bpy.context.object.modifiers[bnm].show_in_editmode = self.bedt\n bpy.context.scene.objects.active = bpy.context.scene.objects.active", "def class_and_replace(org_label, classes):\r\n # Classify and replace\r\n keys = np.unique(org_label)\r\n values = classes #0 for non veg, 1 for veg\r\n dictionary = dict(zip(keys, values))\r\n classified = replace(org_label, dictionary)\r\n \r\n # Label merged regions with unique ID\r\n labeld = label(classified)\r\n number_of_labels = np.unique(labeld)\r\n newvals = np.asarray(list(range(1,(len(number_of_labels) + 1))))\r\n keys_ID = number_of_labels\r\n values_ID = newvals\r\n dictionary_ID = dict(zip(keys_ID, values_ID))\r\n classified_ID = replace(labeld, dictionary_ID)\r\n \r\n del(labeld)\r\n \r\n return classified, classified_ID", "def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n super(GAT, self).__init__()\n self.dropout = dropout\n self.outc=nclass\n self.FC=nn.Parameter(torch.zeros(size=(nhid*nheads, self.outc)))\n nn.init.xavier_uniform_(self.FC.data, gain=1.414)\n\n self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n # self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # return predicted labels of development set\n #\n ### len(train_set) 8000, len(dev) = 5000 P(pos) = 0.8 \n #### 0.55, 4.0, 0.30 ----------- 0.766\n #### 0.25 3.5 0.3 -------------- 0.766\n print(pos_prior)\n smoothing_parameter = 3.5\n pos_total_word = 0\n neg_total_word = 0\n pos_word_dict = {}\n neg_word_dict = {}\n dicts = [neg_word_dict, pos_word_dict]\n for i, sentence in enumerate(train_set):\n\n if train_labels[i] == 1: # positive reviews\n for word in sentence:\n pos_total_word += 1 \n if word in stop_words:\n continue\n if word in pos_word_dict:\n pos_word_dict[word] += 1\n else :\n pos_word_dict[word] = 1\n\n else: # negative reviews\n for word in sentence:\n neg_total_word += 1 \n if word in stop_words:\n continue\n if word in neg_word_dict:\n neg_word_dict[word] += 1\n else :\n neg_word_dict[word] = 1\n\n\n prob = {}\n denominator_pos = pos_total_word + smoothing_parameter * (len(pos_word_dict) + 1)\n denominator_neg = neg_total_word + smoothing_parameter * (len(neg_word_dict) + 1)\n de = [denominator_neg, denominator_pos]\n\n for t, dictionary in enumerate(dicts):\n for key, value in dictionary.items():\n if key not in prob:\n prob[key] = {0 : 0, 1 : 0}\n if smoothing_parameter != 0:\n prob[key][1 - t] = -1 * np.log(smoothing_parameter / de[t]) \n # print(prob[key][1 - t])\n\n prob[key][t] = -1 * np.log((value + smoothing_parameter) / de[t]) \n \n\n revised_prob = {}\n for key, value in prob.items():\n if np.abs(value[0] - value[1]) >= 0.25:\n revised_prob[key] = value \n\n print(len(revised_prob))\n\n dev_labels = []\n num_0 = 0\n for i, sentence in enumerate(dev_set):\n pos_odd = -1 * np.log(pos_prior)\n neg_odd = -1 * np.log(1.0 - pos_prior)\n for word in sentence:\n if word in revised_prob:\n pos_odd += revised_prob[word][1]\n neg_odd += revised_prob[word][0]\n \n if pos_odd > neg_odd:\n num_0 += 1\n dev_labels.append(1 if pos_odd <= neg_odd else 0)\n print(num_0)\n\n \n #### bigram model \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n return dev_labels", "def makeB2TwoDetachedDimuons(name,config,inputSel) :\n # define cuts on B object\n wm = ['in_range(%s,AM,%s)' % (config['MASS_MIN']['B'],\n config['MASS_MAX']['B'])]\n wm = '('+('|'.join(wm))+')'\n comboCuts = [LoKiCuts(['SUMPT'],config).code(),wm]\n comboCuts = LoKiCuts.combine(comboCuts)\n momCuts = LoKiCuts(['VCHI2DOF','BPVVDCHI2','BPVIPCHI2','BPVDIRA'], \n config).code()\n B2KSKS = CombineParticles(\"Combine\"+name)\n B2KSKS.DecayDescriptor = 'B0 -> KS0 KS0'\n B2KSKS.CombinationCut = comboCuts\n B2KSKS.MotherCut = momCuts\n \n return Selection(name,\n Algorithm = B2KSKS,\n RequiredSelections = inputSel)", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, org_dev_labels):\r\n\r\n # set to false to use bigram implementation instead\r\n # isUnigram = True\r\n isUnigram = True\r\n\r\n # return predicted labels of development set\r\n spam_words, spam_wordcount = parseIntoWordList(train_set, train_labels, 1)\r\n ham_words, ham_wordcount = parseIntoWordList(train_set, train_labels, 0)\r\n\r\n spamWords, spamProbs, spamUNK = createProbabilitiesList(spam_words, spam_wordcount, smoothing_parameter)\r\n hamWords, hamProbs, hamUNK = createProbabilitiesList(ham_words, ham_wordcount, smoothing_parameter)\r\n\r\n loggedSpam = np.log(spamProbs)\r\n loggedSpamUNK = np.log(spamUNK)\r\n loggedHam = np.log(hamProbs)\r\n loggedHamUNK = np.log(hamUNK)\r\n\r\n # Unigram\r\n dev_spam = []\r\n dev_ham = []\r\n\r\n dev_labels = []\r\n\r\n if isUnigram:\r\n for i in range(len(dev_set)):\r\n probSpam = 0\r\n probHam = 0\r\n\r\n for word in dev_set[i]:\r\n if word in spamWords:\r\n index = spamWords.index(word)\r\n probSpam += loggedSpam[index]\r\n else:\r\n probSpam += loggedSpamUNK\r\n\r\n if word in hamWords:\r\n index = hamWords.index(word)\r\n probHam += loggedHam[index]\r\n else:\r\n probHam += loggedHamUNK\r\n\r\n if (probSpam > probHam):\r\n dev_labels.append(1)\r\n else:\r\n dev_labels.append(0)\r\n\r\n else:\r\n for i in range(len(dev_set)):\r\n probSpam = 0\r\n probHam = 0\r\n\r\n for word in dev_set[i]:\r\n if word in spamWords:\r\n index = spamWords.index(word)\r\n probSpam += loggedSpam[index]\r\n else:\r\n probSpam += loggedSpamUNK\r\n\r\n if word in hamWords:\r\n index = hamWords.index(word)\r\n probHam += loggedHam[index]\r\n else:\r\n probHam += loggedHamUNK\r\n dev_spam.append(probSpam)\r\n dev_ham.append(probHam)\r\n # BiGram\r\n bi_spam_words, bi_spam_count = parseIntoBigramList(train_set, train_labels, 1)\r\n bi_ham_words, bi_ham_count = parseIntoBigramList(train_set, train_labels, 0)\r\n\r\n biSpamWords, biSpamProbs, biSpamUNK = createProbabilitiesList(bi_spam_words, bi_spam_count, smoothing_parameter)\r\n biHamWords, biHamProbs, biHamUNK = createProbabilitiesList(bi_ham_words, bi_ham_count, smoothing_parameter)\r\n\r\n biLoggedSpam = np.log(biSpamProbs)\r\n biLoggedSpamUNK = np.log(biSpamUNK)\r\n biLoggedHam = np.log(biHamProbs)\r\n biLoggedHamUNK = np.log(biHamUNK)\r\n\r\n # Bigram\r\n bi_dev_spam = []\r\n bi_dev_ham = []\r\n\r\n for i in range(len(dev_set)):\r\n biProbSpam = 0\r\n biProbHam = 0\r\n curr_email = dev_set[i]\r\n\r\n for j in range(len(curr_email) - 1):\r\n if (j % 2 == 1):\r\n continue\r\n curr_bigram = curr_email[j] + ' ' + curr_email[j + 1]\r\n\r\n if curr_bigram in biSpamWords:\r\n index = biSpamWords.index(curr_bigram)\r\n biProbSpam += biLoggedSpam[index]\r\n else:\r\n biProbSpam += biLoggedSpamUNK\r\n\r\n if curr_bigram in biHamWords:\r\n index = biHamWords.index(curr_bigram)\r\n biProbHam += biLoggedHam[index]\r\n else:\r\n probHam += biLoggedHamUNK\r\n bi_dev_spam.append(probSpam)\r\n bi_dev_ham.append(probHam)\r\n\r\n # Weights the models (1-lambda) multiplier for unigram and lamba multiplier for bigram\r\n dev_labels = getBigram(bi_dev_ham, bi_dev_spam, dev_ham, dev_set, dev_spam, org_dev_labels)\r\n\r\n return dev_labels", "def test_joint_parameter(self):\n assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])\n assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def test_vector_class():\n points = 10\n riskfree = .03\n maturity = 30/365\n moneyness = np.linspace(-.04, .04, points)\n premium = np.ones_like(moneyness) * .05\n call = True\n data = {'riskfree': riskfree, 'maturity': maturity,\n 'moneyness': moneyness, 'call': call, 'premium': premium}\n\n sigma = np.ones(points) * .13\n bsm = BSmodel(sigma, data)\n\n print(bsm.premium())\n\n weights = np.ones(points) * .63\n means = np.vstack([np.ones(points) * -.01, np.ones(points) * .09])\n stds = np.vstack([np.ones(points) * .16, np.ones(points) * .05])\n param = np.vstack([weights, means, stds])\n mbs = MBSmodel(param, data)\n\n print(mbs.premium())\n\n param_a, param_p = np.ones(points) * 4.5, np.ones(points) * 2\n param_c = -.05 * np.ones(points)\n gb2 = GB2model([param_a, param_p, param_c], data)\n\n print(gb2.premium())", "def findGPCRclass(num_scheme):\n if num_scheme == \"gpcrdba\" or num_scheme == \"gpcrdb\":\n current_class =\"A\"\n #active_class[\"A\"]=[\"active gpcrbold\",\"in active\"]\n elif num_scheme == \"gpcrdbb\":\n current_class =\"B\"\n #active_class[\"B\"]=[\"active gpcrbold\",\"in active\"]\n elif num_scheme == \"gpcrdbc\":\n current_class =\"C\"\n #active_class[\"C\"]=[\"active gpcrbold\",\"in active\"]\n elif num_scheme == \"gpcrdbf\":\n current_class =\"F\"\n #active_class[\"F\"]=[\"active gpcrbold\",\"in active\"]\n return current_class", "def _complement(self, k, p):\n assert self._.d == 2, \"the complement is only defined for two classes\"\n if checkPos(self._.b[0] - self._.c[2]):\n return self._get_class()((k[2], p[2, 2, 1]),\n (Integer(1), p[1, 2, 2]),\n complement=self)\n else:\n return ASParameters._complement(self)", "def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n super(GAT, self).__init__()\n self.dropout = dropout\n\n self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)", "def class AlphaTheta(object):\n pass", "def __convert_prob_into_class(self, probs):\n probs = T.set_subtensor(probs[probs > 0.5], 1)\n return T.set_subtensor(probs[probs <= 0.5], 0)", "def cross(g1, g2):\n\n g1_haploid = product(*g1.split(\",\"))\n g2_haploid = product(*g2.split(\",\"))\n\n probs = defaultdict(float)\n\n count = 0\n for h1, h2 in product(g1_haploid, g2_haploid):\n offspring_g = \",\".join(\"\".join(sorted(i)) for i in zip(h1, h2))\n count += 1\n probs[offspring_g] += 1.0\n\n for gtype in probs.keys():\n probs[gtype] /= count\n\n return probs", "def CalculateProbabilities(self, beta_0, beta_1):\n denom = self.zero_zero + self.zero_one + self.one_zero + self.one_one + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero = min( max( (self.zero_zero + self.zero_one + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one = min( max( (self.one_zero + self.one_one + beta_1 - 1) / denom, 0.0 ), 1.0 )\n \n denom = self.zero_zero + self.one_zero + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero_given_zero = min( max( (self.zero_zero + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one_given_zero = min( max( (self.one_zero + beta_1 - 1) / denom, 0.0 ), 1.0 )\n \n denom = self.zero_one + self.one_one + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero_given_one = min( max( (self.zero_one + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one_given_one = min( max( (self.one_one + beta_1 - 1) / denom, 0.0 ), 1.0 )", "def __init__(self, coor1, coor2):\n self.coor1 = coor1\n self.coor2 = coor2", "def balence_classes(df, btol):\r\n #Find the least supported class and muliply by the tolerance coefficient to get max_count:\r\n ccounts = df['classification'].value_counts()\r\n max_count = np.min(ccounts.values) * btol\r\n #Create a new dataframe with balenced support:\r\n newdf = pd.DataFrame(columns=df.columns.values)\r\n for x in df.groupby('classification'):\r\n if x[1].shape[0] > max_count:\r\n newdf = newdf.append(x[1].sample(max_count).reset_index(drop=True))\r\n else:\r\n newdf = newdf.append(x[1].reset_index(drop=True))\r\n return newdf.reset_index(drop=True)", "def jointly_equally_distributed(\n class1: Av, class2: Av, n: int = 6, dim: int = 2\n ) -> Iterator[Tuple[str, ...]]:\n return (\n tuple(stat[0] for stat in stats)\n for stats in combinations(PermutationStatistic._STATISTICS, dim)\n if all(\n Counter(\n tuple(stat[1](p) for stat in stats) for p in class1.of_length(i)\n )\n == Counter(\n tuple(stat[1](p) for stat in stats) for p in class2.of_length(i)\n )\n for i in range(n + 1)\n )\n )", "def evaluate_dep_type_sets():\n strategies = {\n 'defensive': ['agent', 'advcl', 'parataxis'],\n 'aggressive': ['agent', 'advcl', 'parataxis', 'dep', 'aux', 'ccomp', 'xcomp', 'dobj', 'pobj', 'nsubj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'preconj', 'advmod', 'neg', 'rcmod', 'tmod', 'poss', 'prepc'],\n 'compromise_1': ['agent', 'advcl', 'parataxis', 'aux', 'xcomp', 'pobj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'neg', 'tmod', 'poss', 'prepc'],\n 'compromise_2': ['agent', 'advcl', 'parataxis', 'aux', 'xcomp', 'pobj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'neg', 'tmod', 'poss', 'prepc', 'attr', 'csubj', 'csubjpass', 'number', 'possessive', 'punct', 'ref']\n }\n results = {'classification':{}, 'retrieval':{}}\n\n print '------ CLASSIFICATION EVALUATION --------'\n print '> Reading cases..'\n descriptions_path = '../data/tasa/TASA900_dependencies'\n texts, labels = data.read_files(descriptions_path)\n print '> Creating representations..'\n rep = {}\n for strategy in strategies:\n rep[strategy] = []\n metric = graph.GraphMetrics.CLOSENESS\n for i, text in enumerate(texts):\n if i%10==0: print ' ',str(i)+'/'+str(len(texts))\n for strategy in strategies:\n g = graph_representation.construct_dependency_network(text, exclude=strategies[strategy])\n d = graph_representation.graph_to_dict(g, metric)\n rep[strategy].append(d)\n g = None # just to make sure. I don't trust this damn garbage collector...\n for strategy in strategies:\n rep[strategy] = graph_representation.dicts_to_vectors(rep[strategy])\n print '> Evaluating..'\n for strategy in strategies:\n score = evaluation.evaluate_classification(rep[strategy], labels)\n print ' ', strategy, score\n results['classification'][strategy] = score\n\n data.pickle_to_file(results, 'output/dependencies/types_set_eval_tmp')\n\n print '------ RETRIEVAL EVALUATION --------'\n print '> Reading cases..'\n descriptions_path = '../data/air/problem_descriptions_dependencies'\n description_texts, labels = data.read_files(descriptions_path)\n solutions_path = '../data/air/solutions_preprocessed'\n solution_texts, labels = data.read_files(solutions_path)\n solution_vectors = freq_representation.text_to_vector(solution_texts, freq_representation.FrequencyMetrics.TF_IDF)\n print '> Creating representations..'\n rep = {}\n for strategy in strategies:\n rep[strategy] = []\n metric = graph.GraphMetrics.EIGENVECTOR\n for i, text in enumerate(description_texts):\n if i%1==0: print ' ',str(i)+'/'+str(len(description_texts))\n full_graph = graph_representation.construct_dependency_network(text)\n for strategy in strategies:\n g = graph_representation.construct_dependency_network(text, exclude=strategies[strategy])\n d = graph_representation.graph_to_dict(g, metric)\n rep[strategy].append(d)\n g = None # just to make sure..\n full_graph = None\n #~ if i%100==0: data.pickle_to_file(rep, 'output/dependencies/types_eval_rep_'+str(i))\n for strategy in strategies:\n rep[strategy] = graph_representation.dicts_to_vectors(rep[strategy])\n print '> Evaluating..'\n for strategy in strategies:\n score = evaluation.evaluate_retrieval(rep[strategy], solution_vectors)\n print ' ', strategy, score\n results['retrieval'][strategy] = score\n\n pp.pprint(results)\n data.pickle_to_file(results, 'output/dependencies/types_set_eval')\n\n return results", "def assemble_g_and_B(self, residuals, jacobians, dofs, args, g, B):\n # Assemble B\n self._build_B(jacobians, dofs, args, B)\n\n # Assemble holonomic constraint function\n self._build_g(residuals, dofs, args, g)\n\n return g, B", "def V_checkers_ablation(s_grid, s_n, g_n, s_others,\n f1=4, k1=[3,5], n_h1=128, n_h2=32):\n with tf.variable_scope(\"stage-2\"):\n conv = convnet_1(s_grid, f1=f1, k1=k1, s1=[1,1], scope='conv')\n concated = tf.concat( [conv, s_n, g_n, s_others], axis=1 )\n h1 = tf.layers.dense(inputs=concated, units=n_h1, activation=tf.nn.relu, use_bias=True, name='V_h1')\n h2 = tf.layers.dense(inputs=h1, units=n_h2, activation=tf.nn.relu, use_bias=True, name='V_h2')\n out = tf.layers.dense(inputs=h2, units=1, activation=None, use_bias=False, name='V_out')\n return out", "def __call__(self, class_logits, box_regression, proposals):\n\n class_logits = cat(class_logits, dim=0)\n box_regression = cat(box_regression, dim=0)\n device = class_logits.device\n\n labels = cat([proposal.get_field(\"labels\") for proposal in proposals], dim=0)\n regression_targets = cat([proposal.get_field(\"regression_targets\") for proposal in proposals], dim=0)\n\n classification_loss = F.cross_entropy(class_logits, labels.long())\n\n # get indices that correspond to the regression targets for\n # the corresponding ground truth labels, to be used with\n # advanced indexing\n sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)\n labels_pos = labels[sampled_pos_inds_subset]\n if self.cls_agnostic_bbox_reg:\n map_inds = torch.tensor([4, 5, 6, 7], device=device)\n else:\n map_inds = 4 * labels_pos[:, None] + torch.tensor([0, 1, 2, 3], device=device)\n\n box_loss = smooth_l1_loss(\n box_regression[sampled_pos_inds_subset[:, None], map_inds],\n regression_targets[sampled_pos_inds_subset],\n size_average=False,\n beta=1,\n )\n box_loss = box_loss / labels.numel()\n\n return classification_loss, box_loss", "def __init__(self, alpha0, alpha1, beta, EPS=1E-8, discretization=100):\n\n LoiAPriori.__init__(self, EPS=EPS, discretization=discretization)\n\n self.__alpha0 = alpha0\n self.__alpha1 = alpha1\n self.__beta = beta\n self.__eta = 3./8.*(1. - self.__alpha0 - self.__alpha1 - 2.*self.__beta)\n if self.__eta<0.: self.__eta=0. # erreur d'arrondi\n #print('self.__eta=', self.__eta)\n\n self.update()", "def GED(g1, g2, lib='gedlibpy', cost='CHEM_1', method='IPFP',\n edit_cost_constant=[], stabilizer='min', repeat=50):\n if lib == 'gedlibpy':\n def convertGraph(G):\n \"\"\"Convert a graph to the proper NetworkX format that can be\n recognized by library gedlibpy.\n \"\"\"\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n# G_new.add_node(str(nd), x=str(attrs['attributes'][0]),\n# y=str(attrs['attributes'][1]))\n for nd1, nd2, attrs in G.edges(data=True):\n G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n# G_new.add_edge(str(nd1), str(nd2))\n\n return G_new\n\n gedlibpy.restart_env()\n gedlibpy.add_nx_graph(convertGraph(g1), \"\")\n gedlibpy.add_nx_graph(convertGraph(g2), \"\")\n\n listID = gedlibpy.get_all_graph_ids()\n gedlibpy.set_edit_cost(cost, edit_cost_constant=edit_cost_constant)\n gedlibpy.init()\n gedlibpy.set_method(method, \"\")\n gedlibpy.init_method()\n\n g = listID[0]\n h = listID[1]\n if stabilizer == None:\n gedlibpy.run_method(g, h)\n pi_forward = gedlibpy.get_forward_map(g, h)\n pi_backward = gedlibpy.get_backward_map(g, h)\n upper = gedlibpy.get_upper_bound(g, h)\n lower = gedlibpy.get_lower_bound(g, h)\n elif stabilizer == 'mean':\n # @todo: to be finished...\n upper_list = [np.inf] * repeat\n for itr in range(repeat):\n gedlibpy.run_method(g, h)\n upper_list[itr] = gedlibpy.get_upper_bound(g, h)\n pi_forward = gedlibpy.get_forward_map(g, h)\n pi_backward = gedlibpy.get_backward_map(g, h)\n lower = gedlibpy.get_lower_bound(g, h)\n upper = np.mean(upper_list)\n elif stabilizer == 'median':\n if repeat % 2 == 0:\n repeat += 1\n upper_list = [np.inf] * repeat\n pi_forward_list = [0] * repeat\n pi_backward_list = [0] * repeat\n for itr in range(repeat):\n gedlibpy.run_method(g, h)\n upper_list[itr] = gedlibpy.get_upper_bound(g, h)\n pi_forward_list[itr] = gedlibpy.get_forward_map(g, h)\n pi_backward_list[itr] = gedlibpy.get_backward_map(g, h)\n lower = gedlibpy.get_lower_bound(g, h)\n upper = np.median(upper_list)\n idx_median = upper_list.index(upper)\n pi_forward = pi_forward_list[idx_median]\n pi_backward = pi_backward_list[idx_median]\n elif stabilizer == 'min':\n upper = np.inf\n for itr in range(repeat):\n gedlibpy.run_method(g, h)\n upper_tmp = gedlibpy.get_upper_bound(g, h)\n if upper_tmp < upper:\n upper = upper_tmp\n pi_forward = gedlibpy.get_forward_map(g, h)\n pi_backward = gedlibpy.get_backward_map(g, h)\n lower = gedlibpy.get_lower_bound(g, h)\n if upper == 0:\n break\n elif stabilizer == 'max':\n upper = 0\n for itr in range(repeat):\n gedlibpy.run_method(g, h)\n upper_tmp = gedlibpy.get_upper_bound(g, h)\n if upper_tmp > upper:\n upper = upper_tmp\n pi_forward = gedlibpy.get_forward_map(g, h)\n pi_backward = gedlibpy.get_backward_map(g, h)\n lower = gedlibpy.get_lower_bound(g, h)\n elif stabilizer == 'gaussian':\n pass\n\n dis = upper\n\n # make the map label correct (label remove map as np.inf)\n nodes1 = [n for n in g1.nodes()]\n nodes2 = [n for n in g2.nodes()]\n nb1 = nx.number_of_nodes(g1)\n nb2 = nx.number_of_nodes(g2)\n pi_forward = [nodes2[pi] if pi < nb2 else np.inf for pi in pi_forward]\n pi_backward = [nodes1[pi] if pi <\n nb1 else np.inf for pi in pi_backward]\n\n return dis, pi_forward, pi_backward", "def attribute_interactions(self, a, b, total_rel_ig_ab=None):\n var_a = self.data.domain.variables[a]\n var_b = self.data.domain.variables[b]\n ig_a = self.info_gains[var_a.name]\n ig_b = self.info_gains[var_b.name]\n if not total_rel_ig_ab:\n ig_ab = ig_a + ig_b - (self.class_entropy + self.h(self.get_probs(var_a, var_b))) + \\\n self.h(self.get_probs(var_a, var_b, self.data.domain.variables[-1]))\n else:\n ig_ab = ig_a + ig_b - total_rel_ig_ab * self.class_entropy\n inter = Interaction(var_a, var_b, ig_a, ig_b, ig_ab, self.class_entropy)\n return inter", "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def test_GNB():\n gnb = GNB()\n n = 10\n nclasses = 2\n nfeatures = 4\n means = (np.tile(10**np.arange(nfeatures), (nclasses, 1))\n * np.arange(1, nclasses+1)[:,None])\n sds = (np.tile(.1**(np.arange(nfeatures)), (nclasses, 1))\n * np.arange(1, nclasses+1)[:, None])\n\n x = np.concatenate(np.random.normal(means, sds, (n // nclasses, *means.shape)))\n y = np.tile(np.arange(nclasses), n // nclasses)\n\n gnb.fit(x, y)\n pred = gnb.predict(x)\n\n print(gnb.means.shape, means.shape)\n print(gnb.sds.shape, sds.shape)\n\n print('comparing mean estimates')\n print('========================')\n print(gnb.means - means)\n print(f'largest difference = {np.max(np.abs(gnb.means - means)):.3f}')\n\n print('\\ncomparing sd estimates')\n print('======================')\n print(gnb.sds - sds)\n print(f'largest difference = {np.max(np.abs(gnb.sds - sds)):.3f}')\n\n print('\\ncomparing predictions')\n print('=====================')\n print(y - pred)\n print(f'largest difference = {np.max(np.abs(pred - y)):.3f}')\n\n\n preds = modeltest(gnb, x, y, x)\n assert np.all(preds == y)", "def merge_bowl(self):\n self.B += self.Bowl_bS\n self.Bc += self.Bowl_bC\n self.W += self.Bowl_WS\n self.Wc += self.Bowl_WS", "def get_balancing_probabilities(instances):\n count_continue = sum(classification == Action.CONTINUE \n for _, classification in instances)\n count_turn_peak = sum(classification == Action.TURN_PEAK\n for _, classification in instances)\n count_backtrack = sum(classification == Action.BACKTRACK\n for _, classification in instances)\n\n min_count = min(count_continue, count_turn_peak, count_backtrack)\n probabilities = { Action.CONTINUE : multiplier_continue * \n float(min_count) / count_continue,\n Action.TURN_PEAK : multiplier_turn_peak * \n float(min_count) / count_turn_peak,\n Action.BACKTRACK : multiplier_backtrack *\n float(min_count) / count_backtrack }\n return probabilities", "def ent_reg_cost(geom: geometry.Geometry,\n a: jnp.ndarray,\n b: jnp.ndarray,\n tau_a: float,\n tau_b: float,\n f: jnp.ndarray,\n g: jnp.ndarray) -> jnp.ndarray:\n\n if tau_a == 1.0:\n div_a = jnp.sum(\n jnp.where(a > 0, (f - geom.potential_from_scaling(a)) * a, 0.0))\n else:\n rho_a = geom.epsilon * (tau_a / (1 - tau_a))\n div_a = jnp.sum(\n jnp.where(\n a > 0,\n a * (rho_a - (rho_a + geom.epsilon / 2) *\n jnp.exp(-(f - geom.potential_from_scaling(a)) / rho_a)), 0.0))\n\n if tau_b == 1.0:\n div_b = jnp.sum(\n jnp.where(b > 0, (g - geom.potential_from_scaling(b)) * b, 0.0))\n else:\n rho_b = geom.epsilon * (tau_b / (1 - tau_b))\n div_b = jnp.sum(\n jnp.where(\n b > 0,\n b * (rho_b - (rho_b + geom.epsilon / 2) *\n jnp.exp(-(g - geom.potential_from_scaling(b)) / rho_b)), 0.0))\n\n # Using https://arxiv.org/pdf/1910.12958.pdf (30), corrected with (15)\n # The total mass of the coupling is computed in scaling space. This avoids\n # differentiation issues linked with the automatic differention of\n # jnp.exp(jnp.logsumexp(...)) when some of those logs appear as -inf.\n # Because we are computing total mass it is irrelevant to have underflow since\n # this would simply result in near 0 contributions, which, unlike Sinkhorn\n # iterations, do not appear next in a numerator.\n total_sum = jnp.sum(geom.marginal_from_scalings(\n geom.scaling_from_potential(f), geom.scaling_from_potential(g)))\n return div_a + div_b + geom.epsilon * (jnp.sum(a) * jnp.sum(b) - total_sum)", "def _get_bclass(mal):\n gen = (len(list(group)) for _, group in itertools.groupby(mal))\n return tuple(sorted(gen, reverse=True))" ]
[ "0.6031749", "0.5755874", "0.5419031", "0.53809613", "0.5378334", "0.5378009", "0.53204846", "0.52991605", "0.52701527", "0.52547026", "0.52520907", "0.523579", "0.51886666", "0.5179942", "0.51621455", "0.5144527", "0.5130142", "0.5116478", "0.51110935", "0.51048064", "0.510119", "0.5093525", "0.50917774", "0.5085855", "0.5072545", "0.50439686", "0.50119615", "0.50037694", "0.50000757", "0.49944207", "0.4983827", "0.4981201", "0.49455357", "0.49445978", "0.4944527", "0.49444902", "0.49371514", "0.4918503", "0.491368", "0.49128228", "0.49128208", "0.49048844", "0.49025142", "0.489963", "0.48973057", "0.4895106", "0.48946717", "0.4889278", "0.48866662", "0.48831087", "0.48825732", "0.48802608", "0.4877677", "0.48741603", "0.48701835", "0.48658907", "0.48635915", "0.4863387", "0.4862543", "0.48602626", "0.48600075", "0.48515204", "0.48450032", "0.48362306", "0.483488", "0.48337194", "0.48313847", "0.48288035", "0.48253334", "0.4823979", "0.48237303", "0.4813115", "0.48054293", "0.48039043", "0.47993296", "0.47968233", "0.47924185", "0.47840667", "0.47837254", "0.47816837", "0.47758242", "0.47734648", "0.47684819", "0.47633877", "0.4761424", "0.47606456", "0.4758191", "0.4755365", "0.47552273", "0.47545525", "0.47517458", "0.47507325", "0.47493088", "0.47481048", "0.4745554", "0.47440812", "0.47435424", "0.4743402", "0.47431922", "0.4742164", "0.47410825" ]
0.0
-1
EscrowTransactionResponse a model defined in Swagger
def __init__(self, id=None, payee_wallet_id=None, payer_wallet_id=None, amount=None, withdrawn=None, escrow_address=None, record_status=None, create_date=None, update_date=None): # noqa: E501 # noqa: E501 self._id = None self._payee_wallet_id = None self._payer_wallet_id = None self._amount = None self._withdrawn = None self._escrow_address = None self._record_status = None self._create_date = None self._update_date = None self.discriminator = None if id is not None: self.id = id if payee_wallet_id is not None: self.payee_wallet_id = payee_wallet_id if payer_wallet_id is not None: self.payer_wallet_id = payer_wallet_id if amount is not None: self.amount = amount if withdrawn is not None: self.withdrawn = withdrawn if escrow_address is not None: self.escrow_address = escrow_address if record_status is not None: self.record_status = record_status if create_date is not None: self.create_date = create_date if update_date is not None: self.update_date = update_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_to_rest_resource(self, model, verbose=False):\n return Resource(model, TRANSACTION_FIELDS).to_dict(verbose)", "def _create_response_model(self, data):\n pass", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(EscrowTransactionResponse, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "def error(transaction, code): # pragma: no cover\n transaction.block_transfer = True\n transaction.response = Response()\n transaction.response.destination = transaction.request.source\n transaction.response.type = defines.Types[\"RST\"]\n transaction.response.token = transaction.request.token\n transaction.response.code = code\n return transaction", "def serialize_response(self, response):\n raise NotImplementedError()", "def to_payload(self, model):\n return model", "def normalize_transaction_result(cls, result: JSON) -> JSON:\n ...", "def get_response(self):\n res = ARBlockRes()\n for field in [\"ARType\", \"ARUUID\", \"SessionKey\"]:\n res.setfieldval(field, self.getfieldval(field))\n return res", "def serializer_class(self):", "def to_primitive(self):\n data = dict(\n template=sha256(self.template.template.encode('utf-8')).hexdigest(),\n rights_modules=list(self.rights_modules.values_list('ident', flat=True)),\n transaction_models=[self.transaction_model.ident],\n )\n wrapped = {'json': data}\n return wrapped", "def post(self, request):\n serializer = UnitComplexSerializer(request.data)\n serializer.save()\n return Response(serializer.data)", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_pessoa': 'int',\n 'id_cartao': 'int',\n 'id_bandeira': 'int',\n 'id_tipo_cartao': 'int',\n 'numero_cartao': 'str',\n 'nome_plastico': 'str',\n 'cvv2': 'str',\n 'data_geracao': 'str',\n 'data_validade': 'str',\n 'cpf': 'str',\n 'tipo_portador': 'str',\n 'trilha1': 'str',\n 'trilha2': 'str',\n 'trilha_cvv1': 'str',\n 'trilha_cvv2': 'str',\n 'flag_virtual': 'int',\n 'nome_bandeira': 'str',\n 'flag_titular': 'int',\n 'sequencial_cartao': 'int',\n 'id_status': 'int',\n 'descricao_status_cartao': 'str',\n 'data_status': 'str',\n 'id_estagio': 'int',\n 'descricao_estagio': 'str',\n 'data_estagio': 'str',\n 'numero_bin': 'str',\n 'id_produto': 'int',\n 'descricao_produto': 'str',\n 'id_status_conta': 'int',\n 'descricao_status_conta': 'int',\n 'data_embossing': 'str',\n 'codigo_desbloqueio': 'str',\n 'nome_pessoa': 'str',\n 'tipo_pessoa': 'str',\n 'data_nascimento': 'str',\n 'id_endereco': 'int',\n 'id_tipo_endereco': 'int',\n 'descricao_tipo_endereco': 'str',\n 'cep': 'str',\n 'logradouro': 'str',\n 'numero_endereco': 'str',\n 'complemento_endereco': 'str',\n 'bairro': 'str',\n 'cidade': 'str',\n 'uf': 'str',\n 'pais': 'str',\n 'senha_criptografada': 'str',\n 'icvv': 'str',\n 'id_status_impressao': 'int'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_pessoa': 'idPessoa',\n 'id_cartao': 'idCartao',\n 'id_bandeira': 'idBandeira',\n 'id_tipo_cartao': 'idTipoCartao',\n 'numero_cartao': 'numeroCartao',\n 'nome_plastico': 'nomePlastico',\n 'cvv2': 'cvv2',\n 'data_geracao': 'dataGeracao',\n 'data_validade': 'dataValidade',\n 'cpf': 'cpf',\n 'tipo_portador': 'tipoPortador',\n 'trilha1': 'trilha1',\n 'trilha2': 'trilha2',\n 'trilha_cvv1': 'trilhaCVV1',\n 'trilha_cvv2': 'trilhaCVV2',\n 'flag_virtual': 'flagVirtual',\n 'nome_bandeira': 'nomeBandeira',\n 'flag_titular': 'flagTitular',\n 'sequencial_cartao': 'sequencialCartao',\n 'id_status': 'idStatus',\n 'descricao_status_cartao': 'descricaoStatusCartao',\n 'data_status': 'dataStatus',\n 'id_estagio': 'idEstagio',\n 'descricao_estagio': 'descricaoEstagio',\n 'data_estagio': 'dataEstagio',\n 'numero_bin': 'numeroBin',\n 'id_produto': 'idProduto',\n 'descricao_produto': 'descricaoProduto',\n 'id_status_conta': 'idStatusConta',\n 'descricao_status_conta': 'descricaoStatusConta',\n 'data_embossing': 'dataEmbossing',\n 'codigo_desbloqueio': 'codigoDesbloqueio',\n 'nome_pessoa': 'nomePessoa',\n 'tipo_pessoa': 'tipoPessoa',\n 'data_nascimento': 'dataNascimento',\n 'id_endereco': 'idEndereco',\n 'id_tipo_endereco': 'idTipoEndereco',\n 'descricao_tipo_endereco': 'descricaoTipoEndereco',\n 'cep': 'cep',\n 'logradouro': 'logradouro',\n 'numero_endereco': 'numeroEndereco',\n 'complemento_endereco': 'complementoEndereco',\n 'bairro': 'bairro',\n 'cidade': 'cidade',\n 'uf': 'uf',\n 'pais': 'pais',\n 'senha_criptografada': 'senhaCriptografada',\n 'icvv': 'icvv',\n 'id_status_impressao': 'idStatusImpressao'\n }\n\n self._id_conta = None\n self._id_pessoa = None\n self._id_cartao = None\n self._id_bandeira = None\n self._id_tipo_cartao = None\n self._numero_cartao = None\n self._nome_plastico = None\n self._cvv2 = None\n self._data_geracao = None\n self._data_validade = None\n self._cpf = None\n self._tipo_portador = None\n self._trilha1 = None\n self._trilha2 = None\n self._trilha_cvv1 = None\n self._trilha_cvv2 = None\n self._flag_virtual = None\n self._nome_bandeira = None\n self._flag_titular = None\n self._sequencial_cartao = None\n self._id_status = None\n self._descricao_status_cartao = None\n self._data_status = None\n self._id_estagio = None\n self._descricao_estagio = None\n self._data_estagio = None\n self._numero_bin = None\n self._id_produto = None\n self._descricao_produto = None\n self._id_status_conta = None\n self._descricao_status_conta = None\n self._data_embossing = None\n self._codigo_desbloqueio = None\n self._nome_pessoa = None\n self._tipo_pessoa = None\n self._data_nascimento = None\n self._id_endereco = None\n self._id_tipo_endereco = None\n self._descricao_tipo_endereco = None\n self._cep = None\n self._logradouro = None\n self._numero_endereco = None\n self._complemento_endereco = None\n self._bairro = None\n self._cidade = None\n self._uf = None\n self._pais = None\n self._senha_criptografada = None\n self._icvv = None\n self._id_status_impressao = None", "def get_response_model_ctor(self):\n return self._response_model_ctor", "def new_get_transactions(self, cb_account_id):\n if cb_account_id == \"wallet_id_ltc\":\n return MockAPIObject(data=[{\n \"id\": \"12234-6666-8888-0000-1111111111\",\n \"type\": \"send\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": \"-0.2\",\n \"currency\": \"LTC\"\n },\n \"native_amount\": {\n \"amount\": \"-46.00\",\n \"currency\": \"EUR\"\n },\n \"description\": None,\n \"created_at\": \"2017-12-15T15:00:00Z\",\n \"updated_at\": \"2017-12-15T15:00:00Z\",\n \"resource\": \"transaction\",\n \"network\": {\n \"status\": \"confirmed\",\n \"hash\": \"123456789\",\n \"transaction_fee\": {\n \"amount\": \"0.001\",\n \"currency\": \"LTC\"\n },\n \"transaction_amount\": {\n \"amount\": \"0.199\",\n \"currency\": \"LTC\"\n },\n \"confirmations\": 54000\n },\n \"to\": {\n \"resource\": \"litecoin_address\",\n \"address\": \"LcnAddress1\",\n \"currency\": \"LTC\"\n },\n \"details\": {\n \"title\": \"Sent Litecoin\",\n \"subtitle\": \"To Litecoin address\"\n }\n }, \n {\n \"id\": \"aaaaaaaaa-aaaa-aaaaaa-eeee-aaaaaa\",\n \"type\": \"send\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": \"-0.4\",\n \"currency\": \"LTC\"\n },\n \"native_amount\": {\n \"amount\": \"-90.00\",\n \"currency\": \"EUR\"\n },\n \"description\": None,\n \"created_at\": \"2017-12-11T19:00:00Z\",\n \"updated_at\": \"2017-12-11T19:00:00Z\",\n \"resource\": \"transaction\",\n \"instant_exchange\": False,\n \"network\": {\n \"status\": \"confirmed\",\n \"hash\": \"123456789\",\n \"transaction_fee\": {\n \"amount\": \"0.001\",\n \"currency\": \"LTC\"\n },\n \"transaction_amount\": {\n \"amount\": \"0.399\",\n \"currency\": \"LTC\"\n },\n \"confirmations\": 15387\n },\n \"to\": {\n \"resource\": \"litecoin_address\",\n \"address\": \"LcnAddress2\",\n \"currency\": \"LTC\"\n },\n \"details\": {\n \"title\": \"Sent Litecoin\",\n \"subtitle\": \"To Litecoin address\"\n }\n }, \n {\n \"id\": \"aaaaaaaaa-aaaa-aaaaaa-eeee-aaaaaa\",\n \"type\": \"send\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": \"1.0\",\n \"currency\": \"LTC\"\n },\n \"native_amount\": {\n \"amount\": \"90.00\",\n \"currency\": \"EUR\"\n },\n \"description\": None,\n \"created_at\": \"2017-12-11T19:00:00Z\",\n \"updated_at\": \"2017-12-11T19:00:00Z\",\n \"resource\": \"transaction\",\n \"instant_exchange\": False,\n \"network\": {\n \"status\": \"off_blockchain\",\n },\n }])\n else:\n return MockAPIObject()", "def serialize(self):\n return {\n \"id\": self.id,\n \"sid\": self.sid,\n \"sku\": self.sku,\n \"name\": self.name,\n \"price\": self.price,\n \"amount\": self.amount,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time\n }", "def make_response(self):\n params = {\n 'tweet.fields': 'created_at,public_metrics,entities',\n 'expansions': 'author_id',\n 'user.fields': 'description'\n }\n return self.response_limit(params)", "def to_response(self):\n raise NotImplementedError(\"Must define to_response on `%s`\" % self.__class__.__name__)", "def __call__(self, r):\n # modify and return the request\n nonce = ExchBitmexRestApiConnector.generate_nonce()\n r.headers['api-nonce'] = str(nonce)\n r.headers['api-key'] = self.apiKey\n r.headers['api-signature'] = ExchBitmexRestApiConnector.generate_signature(\n self.apiSecret, r.method, r.url, nonce, r.body or '')\n return r", "def to_response_data(self) -> typing.Any:\n return None", "def post(self, request):\n serializer = UnitSerializer(request.data)\n serializer.save()\n return Response(\n serializer.data\n )", "def __init__(self, response, accountId=False):\n self.result = response \n self.accountId = accountId", "def _openapi_json(self):\n # We don't use Flask.jsonify here as it would sort the keys\n # alphabetically while we want to preserve the order.\n from pprint import pprint\n pprint(self.to_dict())\n return current_app.response_class(json.dumps(self.to_dict(), indent=4),\n mimetype='application/json')", "def get(self, request, *args, **kwargs):\n lookup = {\n \"property__name\":self.kwargs[\"property\"],\n \"property__exhibit\": self.get_parent_object()\n }\n transaction = get_object_or_404(AugmentTransaction, **lookup)\n\n still_running_statuses = (models.TX_STATUS[\"pending\"],\n models.TX_STATUS[\"scheduled\"],\n models.TX_STATUS[\"running\"])\n if transaction.status == models.TX_STATUS[\"success\"]:\n body = json.dumps(transaction.result)\n response = HttpResponse(body, status=201)\n data_url = reverse('draft_exhibit_property_data',\n kwargs={\n 'owner': self.kwargs[\"owner\"],\n 'slug': self.kwargs[\"slug\"],\n 'property': self.kwargs[\"property\"]\n })\n response[\"Location\"] = data_url\n response[\"Content-Type\"] = \"application/json\"\n response[\"Expires\"] = 0\n elif transaction.status in still_running_statuses:\n response = HttpResponse(\"{}\")\n response[\"Content-Type\"] = \"application/json\"\n else:\n # transction has failed or been cancelled\n body = json.dumps(transaction.result)\n response = HttpResponse(body)\n response[\"Content-Type\"] = \"application/json\"\n return response", "def to_json(self):\n\n output = {\n \"typeName\":self.typeName,\n \"entityStatus\":self.entityStatus,\n \"propagate\":self.propagate,\n \"removePropagationsOnEntityDelete\": self.removePropagationsOnEntityDelete,\n \"validityPeriods\":self.validityPeriods,\n \"attributes\":self.attributes\n }\n return output", "def __call__(self, rv):\n raise NotImplementedError(\"You must subclass from ApiResponse.\")", "def to_api_repr(self):\n raise NotImplementedError", "def adapt_response(self, response):\n return response", "def adapt_response(self, response):\n return response", "def get_response(self):\n res = IODWriteRes()\n for field in [\"seqNum\", \"ARUUID\", \"API\", \"slotNumber\",\n \"subslotNumber\", \"index\"]:\n res.setfieldval(field, self.getfieldval(field))\n return res", "def serialize(self):\n return {'id': self.id,\n 'rowId': self.id,\n 'organization': self.organization.name,\n 'name': self.name,\n }", "def self(self, request):\n serializer_class = self.get_serializer_class()\n serializer = serializer_class(request.user, context={ \"request\": request })\n return Response(serializer.data)", "def serialize(self):\n return {\n 'id' : self.id,\n 'created' : self.created.isoformat(),\n 'newCar' : self.newCar,\n 'type' : self.type,\n 'make' : self.make,\n 'model' : self.model,\n 'trim' : self.trim,\n 'year' : self.year,\n 'mileage' : self.mileage,\n 'price' : self.price,\n 'description' : self.description,\n 'dealer_id' : self.dealer_id,\n }", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_produto': 'int',\n 'id_pessoa': 'int',\n 'id_parentesco': 'int',\n 'tipo_portador': 'str',\n 'nome_impresso': 'str',\n 'id_tipo_cartao': 'int',\n 'flag_ativo': 'int',\n 'data_cadastro_portador': 'str',\n 'data_cancelamento_portador': 'str'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_produto': 'idProduto',\n 'id_pessoa': 'idPessoa',\n 'id_parentesco': 'idParentesco',\n 'tipo_portador': 'tipoPortador',\n 'nome_impresso': 'nomeImpresso',\n 'id_tipo_cartao': 'idTipoCartao',\n 'flag_ativo': 'flagAtivo',\n 'data_cadastro_portador': 'dataCadastroPortador',\n 'data_cancelamento_portador': 'dataCancelamentoPortador'\n }\n\n self._id_conta = None\n self._id_produto = None\n self._id_pessoa = None\n self._id_parentesco = None\n self._tipo_portador = None\n self._nome_impresso = None\n self._id_tipo_cartao = None\n self._flag_ativo = None\n self._data_cadastro_portador = None\n self._data_cancelamento_portador = None", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'id': self.id,\n 'price': self.price,\n 'catch_phrase': self.catch_phrase,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'id': self.id,\n 'price': self.price,\n }", "def as_json(self):", "def _update_response_detail(self, request, serializer):\n if self._short_response(request):\n data = serializer.data\n detail = dict(id=data['id'],\n update_time=data['update_time'],\n uuid=data['uuid'],\n version=data['version'])\n else:\n detail = serializer.data\n return detail", "def _get(self, request_obj):\n return ResponseData(ActionsSerializer(request_obj).serialize())", "def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"detail\": self.detail,\n \"date_on\": self.date_on,\n }", "def to_model(self, payload):\n return payload", "def json(self) -> dict:\n return {\n 'id': self.id,\n 'requestType': self.request_type.name,\n 'isProcessed': self.is_processed,\n 'serviceName': self.service_name.name,\n 'isAdmin': self.is_admin,\n 'creationDate': LegislationDatetime.as_legislation_timezone(self.creation_date).isoformat()\n }", "def serialize(self):\n return {\n 'time_stamp' : self.time_stamp,\n 'email' : self.email,\n 'amount_deposit' : self.amount_deposit,\n 'amount_withdraw' : self.amount_withdraw,\n }", "def get(self) -> Response:\n\n query = Opportunity.objects(organization=get_jwt_identity())\n\n authorized: bool = Users.objects.get(id=get_jwt_identity()).roles.organization or \\\n Users.objects.get(id=get_jwt_identity()).roles.admin\n\n if authorized:\n fields = {\n 'paid',\n 'description',\n 'published',\n 'id',\n }\n converted = convert_query(query, fields)\n return jsonify(converted)\n else:\n return forbidden()", "def to_api_data(self):\n raise NotImplementedError()", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'ticket_id': 'str',\n 'type': 'str',\n 'from_number': 'str',\n 'from_name': 'str',\n 'to_number': 'str',\n 'to_name': 'str',\n 'via_number': 'str',\n 'date_created': 'datetime',\n 'date_answered': 'datetime',\n 'date_finished': 'datetime'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'ticket_id': 'ticketId',\n 'type': 'type',\n 'from_number': 'fromNumber',\n 'from_name': 'fromName',\n 'to_number': 'toNumber',\n 'to_name': 'toName',\n 'via_number': 'viaNumber',\n 'date_created': 'dateCreated',\n 'date_answered': 'dateAnswered',\n 'date_finished': 'dateFinished'\n }\n\n self._id = None\n self._ticket_id = None\n self._type = None\n self._from_number = None\n self._from_name = None\n self._to_number = None\n self._to_name = None\n self._via_number = None\n self._date_created = None\n self._date_answered = None\n self._date_finished = None", "def get_transaction_detail(payload):\n response = requests.post(url, data=payload)\n return response.json()", "def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"currency\": self.currency,\n \"old_price\": self.old_price,\n \"price\": self.price,\n \"availability\": self.availability,\n \"url\": self.url,\n \"img_url\": self.img_url\n }", "def endpoint_response(self) -> Response:\n return self._resp", "def json(self):\n # Response legacy data: allow for any column to be null.\n document = {\n 'mhrNumber': self.mhr_number,\n 'documentType': self.document_type,\n 'documentRegistrationNumber': self.document_reg_id,\n 'interimed': self.interimed,\n 'ownerCrossReference': self.owner_cross_reference,\n 'interestDenominator': self.interest_denominator,\n 'declaredValue': self.declared_value,\n 'ownLand': self.own_land,\n 'routingSlipNumber': self.routing_slip_number,\n 'lastService': self.last_service,\n 'bcolAccount': self.bcol_account,\n 'datNumber': self.dat_number,\n 'examinerId': self.examiner_id,\n 'updateId': self.update_id,\n 'phoneNumber': self.phone_number,\n 'attentionReference': self.attention_reference,\n 'name': self.name,\n 'legacyAddress': self.legacy_address,\n 'numberOfPages': self.number_of_pages,\n 'considerationValue': self.consideration_value,\n 'affirmByName': self.affirm_by_name,\n 'liensWithConsent': self.liens_with_consent,\n 'clientReferenceId': self.client_reference_id\n }\n if self.draft_ts:\n document['draftDateTime'] = model_utils.format_local_ts(self.draft_ts)\n if self.registration_ts:\n document['createDateTime'] = model_utils.format_local_ts(self.registration_ts)\n if self.transfer_execution_date and self.transfer_execution_date.year > 1:\n document['transferDate'] = model_utils.format_local_date(self.transfer_execution_date)\n return document", "def response_class(self):\n raise NotImplementedError()", "def get_response(self):\n res = IODControlRes()\n for field in [\"ARUUID\", \"SessionKey\", \"AlarmSequenceNumber\"]:\n res.setfieldval(field, self.getfieldval(field))\n\n res.block_type = self.block_type + 0x8000\n return res", "def as_entity(self):\n return {\n 'type': self.api_sub_type,\n 'value': unquote(self.unique_id), # type: ignore\n 'id': self._data.get('id'),\n }", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def to_response(self, data):\n return self.from_dict(data).to_dict()", "def response(schema):\n def _response(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n if issubclass(schema, BaseModel):\n has_root = True if '__root__' in schema.__fields__ else False\n function_res = function(*args, **kwargs)\n\n if not function_res:\n if has_root is True:\n return jsonify([])\n return jsonify({})\n\n if type(function_res) == list:\n res = schema.parse_obj(function_res)\n else:\n res = schema.from_orm(function_res)\n\n res = res.dict()\n\n if has_root is True:\n return jsonify(res['__root__'])\n\n return jsonify(res)\n elif isinstance(schema, dict):\n return jsonify(schema)\n else:\n raise CustomException('invalid response type', code=400)\n\n return wrapper\n return _response", "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)", "def serialize(self):\n return { \n \n 'balance' : self.balance,\n 'email' : self.email,\n\n }", "def serialize(self, obj):\n return obj", "def create_response_element(self, **kwargs):\r\n return None", "def __init__(self,\n id=None,\n number=None,\n name=None,\n balance=None,\n mtype=None,\n status=None,\n customer_id=None,\n institution_id=None,\n balance_date=None,\n created_date=None,\n currency=None,\n institution_login_id=None,\n display_position=None,\n real_account_number_last_4=None,\n aggregation_status_code=None,\n aggregation_success_date=None,\n aggregation_attempt_date=None,\n last_transaction_date=None,\n detail=None,\n position=None,\n additional_properties = {},\n json_data=None):\n\n # Initialize members of the class\n self.id = id\n self.number = number\n self.real_account_number_last_4 = real_account_number_last_4\n self.name = name\n self.balance = balance\n self.mtype = mtype\n self.aggregation_status_code = aggregation_status_code\n self.status = status\n self.customer_id = customer_id\n self.institution_id = institution_id\n self.balance_date = balance_date\n self.aggregation_success_date = aggregation_success_date\n self.aggregation_attempt_date = aggregation_attempt_date\n self.created_date = created_date\n self.currency = currency\n self.last_transaction_date = last_transaction_date\n self.institution_login_id = institution_login_id\n self.detail = detail\n self.position = position\n self.display_position = display_position\n\n # Add additional model properties to the instance\n self.additional_properties = additional_properties\n\n # Store original response\n self.json_data = json_data", "def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n\n data = {\n 'header': response_header(msg='Retrieve request successfully processed.',\n username=request.user.username,\n api_status=constants.STATUS_OK),\n 'detail': serializer.data\n }\n\n return Response(data=data, status=status.HTTP_200_OK)", "def _create_response_detail(self, request, serializer):\n def build_item(source):\n \"\"\"build time data\"\"\"\n return dict(id=source['id'],\n uuid=source['uuid'],\n creation_time=source['creation_time'],\n version=source['version'])\n if self._short_response(request):\n data = serializer.data\n if isinstance(data, (list)):\n detail = [build_item(item) for item in data]\n else:\n detail = build_item(data)\n else:\n detail = serializer.data\n return detail", "def start_transaction(self) -> \"Transaction\":\n response = super().start_transaction()\n self._set_transaction_id(response.id)\n return response", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'ingredients': self.ingredients,\n 'directions': self.directions,\n 'type': self.type,\n }", "def receiver():\n def generate(entities_to_proceed):\n \"\"\"Process list of entities populating them with altitude data\"\"\"\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"\n\n # get entities from request\n entities = request.get_json()\n\n # create the response\n logging.debug(\"Processing %i entities\", len(entities))\n return Response(generate(entities), mimetype='application/json')", "def _format_query_response(self):\n output = self._initialize_response_output(self.parameters)\n output[\"data\"] = self.query_data\n\n self.query_sum = self._pack_data_object(self.query_sum, **self._mapper.PACK_DEFINITIONS)\n output[\"total\"] = self.query_sum\n\n if self._delta:\n output[\"delta\"] = self.query_delta\n\n return output", "def test_get_transaction_details_request(self):\n self.trans_details.get_transaction_details(\n trans_id = 123456,\n )", "def list(self, request):\n # Get transaction\n transaction_id = self.request.query_params.get('transaction_id')\n transaction = Transaction.objects.get(id=transaction_id)\n\n return JsonResponse({\"status\": transaction.status, \"transaction_error\": transaction.error_type})", "def transaction(id=None):\n\n if request.method == 'GET':\n if id:\n data = LineItem.query.get_or_404(id).to_dict()\n return jsonify(data)\n else:\n return bad_request('no id provided')\n\n # Create a new transaction\n elif request.method == 'POST':\n data = request.get_json() or {}\n # check for required fields\n if 'amount' not in data or\\\n 'location' not in data or\\\n 'category_id' not in data or\\\n 'date' not in data:\n return bad_request('missing data')\n\n date = datetime.strptime(data['date'], \"%m/%d/%Y\")\n lineitem = LineItem(data['amount'], date, data['location'],\n data['description'], data['category_id'])\n db.session.add(lineitem)\n db.session.commit()\n response = jsonify(lineitem.to_dict())\n response.status_code = 201\n response.headers['location'] = url_for('api.transaction',\n id=lineitem.id)\n return response\n # Edit a transaction\n elif request.method == 'PUT':\n data = request.get_json() or {}\n # is an id specified in either way?\n if 'id' in data:\n if id is None:\n if data['id'] != id:\n return bad_request('two different ids specified')\n else:\n id = int(data['id'])\n elif id is None:\n return bad_request('no id specified')\n\n item = LineItem.query.get_or_404(id)\n item.from_dict(data)\n db.session.commit()\n return jsonify(item.to_dict())\n # Delete\n elif request.method == 'DELETE':\n lineitem = LineItem.query.get_or_404(id)\n db.session.delete(lineitem)\n db.session.commit()\n return '', 204\n else:\n return bad_request('Operation not supported')", "def all_transactions(self, request):\n user_id = request.data[\"user\"]\n user = User.objects.get(id=user_id)\n user_transactions = user.transactions.all()\n serializer = TransactionSerializer(user_transactions, many=True)\n\n return Response(serializer.data)", "def serialize(self):\n return{\n 'name':self.name,\n 'id' :self.id,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n 'price': self.price,\n 'course': self.course,\n 'restaurant_id': self.restaurant_id\n }", "def json(self):\n return {\n 'id': self.id,\n 'id_bank_data': self.id_bank_data,\n 'national_id_document': self.national_id_document,\n 'country': self.country,\n 'name': self.name,\n 'surname': self.surname,\n 'mail': self.mail,\n 'google_token': self.google_token,\n 'role': self.role\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'code': self.code,\n 'email': self.email,\n 'status': self.status.value,\n 'organization_id': self.organization_id\n }", "def get_response_serializers(self):\n responses = OrderedDict({\n '400': 'Invalid arguments',\n '401': 'Not authenticated',\n '403': \"You don't have access to do this operation on this company\",\n 'error': ErrorSerializer,\n })\n\n responses.update(super().get_response_serializers())\n\n return responses", "def getSerializer():", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'price': self.price,\n }", "def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def rest_resource(resource_cls):\n ecommerce_checkout_api.add_resource(resource_cls, *resource_cls.endpoints)\n return resource_cls", "def serialize(self):\n\t\treturn { 'client_id': self.client_id, 'is_in_error' : self.is_in_error, 'error_status' : self.error_status }", "def self_json(self):\n return {\n 'id': self.id, \n 'description': self.description, \n 'price': str(self.price),\n 'quantity': self.quantity\n }", "def normalize_transfer_result(cls, result: JSON) -> JSON:\n ...", "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(Transaction, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "def json(self) -> Any:\n return self.body.json()", "def to_dict(self):\n obj = {\n 'id': self.id,\n 'user_id': self.user_id,\n 'amount': float(self.amount),\n 'description': self.description,\n 'comments': self.comments,\n 'payor': self.payor,\n 'date': self.date,\n }\n return obj", "def to_representation(self, instance):\n return instance", "def dump_api(self, add_config=False):\n\n retour = {}\n\n for val in ['reference', 'extra_data', 'amount', 'postfinance_id', 'postfinance_status', 'internal_status', 'ipn_needed', 'brand', 'card']:\n retour[val] = str(getattr(self, val))\n\n for val in ['creation_date', 'last_userforwarded_date', 'last_user_back_from_postfinance_date', 'last_postfinance_ipn_date', 'last_ipn_date']:\n if getattr(self, val):\n retour[val] = str(localtime(getattr(self, val)))\n else:\n retour[val] = ''\n\n for cal, name in [('get_postfinance_status_display', 'postfinance_status_text'), ('get_internal_status_display', 'internal_status_text'), ('amount_chf', 'amount_chf')]:\n retour[name] = getattr(self, cal)()\n\n if add_config:\n retour['config'] = self.config.name\n\n return retour", "def post(self, entity):\n return '', 200", "def toJSON(self):\n return {'actLib' : self.nomAct,'comLib' : self.comAct,'id' : self.Id,'equID' : self.EquipementId}", "def create_json_from_model(self):\n json = {\n \"enableAutoReply\": self.enable_auto_reply,\n \"responseSubject\": self.response_subject,\n \"responseBodyPlainText\": self.response_body_plain_text,\n \"responseBodyHtml\": self.response_body_html,\n \"restrictToContacts\": self.restrict_to_contacts,\n \"restrictToDomain\": self.restrict_to_domain,\n \"startTime\": self.start_time,\n \"endTime\": self.end_time\n }\n return json", "def transaction(self):\n return Transaction(self)", "def deserialize(self, resp):\r\n return self.serializer.deserialize(resp.content, format=resp['Content-Type'])", "def to_json(self):\n return json.dumps({\n \"header\": self.header,\n \"transactions\": self._transactions\n })", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'domain': 'str',\n 'custom_domain': 'str',\n 'customer_email': 'str',\n 'customer_name': 'str',\n 'company': 'str',\n 'date_created': 'datetime',\n 'date_validity': 'datetime',\n 'status': 'str',\n 'account_id': 'str',\n 'cluster_id': 'str',\n 'task_id': 'str',\n 'version': 'str',\n 'is_latest': 'bool',\n 'product_id': 'str',\n 'variation_id': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'domain': 'domain',\n 'custom_domain': 'custom_domain',\n 'customer_email': 'customer_email',\n 'customer_name': 'customer_name',\n 'company': 'company',\n 'date_created': 'date_created',\n 'date_validity': 'date_validity',\n 'status': 'status',\n 'account_id': 'account_id',\n 'cluster_id': 'cluster_id',\n 'task_id': 'task_id',\n 'version': 'version',\n 'is_latest': 'is_latest',\n 'product_id': 'product_id',\n 'variation_id': 'variation_id'\n }\n\n self._id = None\n self._domain = None\n self._custom_domain = None\n self._customer_email = None\n self._customer_name = None\n self._company = None\n self._date_created = None\n self._date_validity = None\n self._status = None\n self._account_id = None\n self._cluster_id = None\n self._task_id = None\n self._version = None\n self._is_latest = None\n self._product_id = None\n self._variation_id = None", "def serialize(self):\n return {\n \"id\": self.id,\n \"user_id\": self.user_id,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time\n }", "def __call__(self, rv):\n if isinstance(rv, ResponseBase):\n return rv\n data, status, headers = unpack(rv)\n resp = flask.make_response(self._encoder(data, **self.json_settings),\n status, {'Content-Type': self.content_type})\n resp.headers.extend(headers)\n return resp", "def serialize(self):\n return {\n\n\n }", "def web_service_response_example(self, action, controller):", "def serialize(self):\n return{\n # 'date': self.date,\n 'date': self.date,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n }" ]
[ "0.6470275", "0.57408893", "0.56040984", "0.5564933", "0.55613655", "0.554139", "0.55230355", "0.5476537", "0.5448558", "0.5446895", "0.5433953", "0.5374998", "0.5326653", "0.5325249", "0.5271114", "0.5253841", "0.52191997", "0.5192388", "0.5190319", "0.5184639", "0.51791036", "0.51766753", "0.51644796", "0.51437974", "0.51437724", "0.5128186", "0.5111816", "0.5111816", "0.51060736", "0.5100999", "0.5099542", "0.50742674", "0.5070064", "0.50672495", "0.50662893", "0.5058784", "0.50459087", "0.50455004", "0.5039235", "0.50376123", "0.50312024", "0.5028342", "0.50156325", "0.5007519", "0.5007157", "0.50065047", "0.50035244", "0.50001657", "0.4992282", "0.4990686", "0.49886483", "0.49843538", "0.49808654", "0.49786994", "0.49746996", "0.4973613", "0.496521", "0.49650967", "0.49580178", "0.49412316", "0.49405605", "0.49378762", "0.493573", "0.49348292", "0.49275193", "0.49265963", "0.49198118", "0.49185532", "0.49167964", "0.49162212", "0.49137774", "0.49113625", "0.49107426", "0.49040398", "0.48874843", "0.4884985", "0.4882265", "0.48807237", "0.48745707", "0.48711783", "0.48707888", "0.48673907", "0.4867301", "0.4861498", "0.48536792", "0.48529026", "0.4850743", "0.48302183", "0.48268202", "0.48263833", "0.4819427", "0.48184404", "0.48158997", "0.4812253", "0.48005918", "0.47931162", "0.4789181", "0.47863755", "0.47795397", "0.47711152", "0.476988" ]
0.0
-1
Sets the id of this EscrowTransactionResponse.
def id(self, id): self._id = id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_id(self, id):\n self.data['id'] = id", "def set_id(self, id):\n self.__id = id", "def SetId(self, id):\n self.id = int(id)", "def set_id(self, id_):\n\n self.id_ = id_", "def set_id(self, id):\n\n\t\tif id is not None and not isinstance(id, int):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: id EXPECTED TYPE: int', None, None)\n\t\t\n\t\tself.__id = id\n\t\tself.__key_modified['id'] = 1", "def setID(self, id):\n self._id = id\n return self.callRemote('setID', id)", "def id(self, id):\n \n self._id = id", "def id(self, id):\n \n self._id = id", "def id(self, id):\n \n self._id = id", "def id(self, id):\n \n self._id = id", "def id(self, id):\n \n self._id = id", "def id(self, id: int):\n\n self._id = id", "def id(self, id: int):\n\n self._id = id", "def id(self, id: int):\n\n self._id = id", "def id(self, id: int):\n\n self._id = id", "def id(self, id: int):\n\n self._id = id", "def id(self, id: int):\n\n self._id = id", "def id(self, id: int):\n\n self._id = id", "def id(self, id: int):\n\n self._id = id", "def id(self, id: int):\n\n self._id = id", "def id(self, id: int):\n\n self._id = id" ]
[ "0.7112411", "0.68683225", "0.6860865", "0.6837625", "0.6821203", "0.67498773", "0.6711314", "0.6711314", "0.6711314", "0.6711314", "0.6711314", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837", "0.6605837" ]
0.6603375
92
Sets the payee_wallet_id of this EscrowTransactionResponse.
def payee_wallet_id(self, payee_wallet_id): self._payee_wallet_id = payee_wallet_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def payer_wallet_id(self, payer_wallet_id):\n\n self._payer_wallet_id = payer_wallet_id", "def payor_id(self, payor_id):\n\n self._payor_id = payor_id", "def payee_zip(self, payee_zip):\n\n self._payee_zip = payee_zip", "def set_merchant_transaction_id(self, transaction_id):\n self.merchant_transaction_id = transaction_id", "def payee_name(self, payee_name):\n\n self._payee_name = payee_name", "def payee_state(self, payee_state):\n\n self._payee_state = payee_state", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def merchant_id(self, merchant_id):\n\n self._merchant_id = merchant_id", "def earnings_rate_id(self, earnings_rate_id):\n\n self._earnings_rate_id = earnings_rate_id", "def get_wallet(self, walletId):\n return", "def __init__(self, id=None, payee_wallet_id=None, payer_wallet_id=None, amount=None, withdrawn=None, escrow_address=None, record_status=None, create_date=None, update_date=None): # noqa: E501 # noqa: E501\n\n self._id = None\n self._payee_wallet_id = None\n self._payer_wallet_id = None\n self._amount = None\n self._withdrawn = None\n self._escrow_address = None\n self._record_status = None\n self._create_date = None\n self._update_date = None\n self.discriminator = None\n\n if id is not None:\n self.id = id\n if payee_wallet_id is not None:\n self.payee_wallet_id = payee_wallet_id\n if payer_wallet_id is not None:\n self.payer_wallet_id = payer_wallet_id\n if amount is not None:\n self.amount = amount\n if withdrawn is not None:\n self.withdrawn = withdrawn\n if escrow_address is not None:\n self.escrow_address = escrow_address\n if record_status is not None:\n self.record_status = record_status\n if create_date is not None:\n self.create_date = create_date\n if update_date is not None:\n self.update_date = update_date", "def response_id(self, response_id):\n\n self._response_id = response_id", "def payee(self, payee_id: str):\n return get_from_list(self.payees, \"id\", payee_id)", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n for atm in self.iter_alt_loc():\n atm.chain_id = chain_id", "def committee_id(self, committee_id):\n\n self._committee_id = committee_id", "def committee_id(self, committee_id):\n\n self._committee_id = committee_id", "def committee_id(self, committee_id):\n\n self._committee_id = committee_id", "def set_AWSMerchantId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMerchantId', value)", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n self.chain_id = chain_id\n\n for frag in self.iter_fragments():\n frag.set_chain_id(chain_id)", "def payment_id(self, payment_id):\n\n self._payment_id = payment_id", "def envelope_id(self, envelope_id):\n\n self._envelope_id = envelope_id", "def transaction_id(self, transaction_id):\n if transaction_id is None:\n raise ValueError(\"Invalid value for `transaction_id`, must not be `None`\") # noqa: E501\n\n self._transaction_id = transaction_id", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n self.chain_id = chain_id\n\n for atm in self.iter_atoms():\n atm.set_chain_id(chain_id)", "def pay_fee(self, fee):\n self.wallet -= fee", "def save(self, *args, **kwargs):\n wallet = self.wallet.withdraw(self.value)\n super(Payment, self).save(*args, **kwargs)", "def set_received_txn_response(self, transaction_id, origin, code, response_dict):\n\n return self.db.simple_insert(\n table=\"received_transactions\",\n values={\n \"transaction_id\": transaction_id,\n \"origin\": origin,\n \"response_code\": code,\n \"response_json\": db_binary_type(encode_canonical_json(response_dict)),\n \"ts\": self._clock.time_msec(),\n },\n or_ignore=True,\n desc=\"set_received_txn_response\",\n )", "def transaction_amount(self, transaction_amount):\n\n self._transaction_amount = transaction_amount", "def setAccount(self, account_id):\n self.data_struct['_setAccount'] = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n self._account_id = account_id", "def earnings_rate_id(self, earnings_rate_id):\n if earnings_rate_id is None:\n raise ValueError(\n \"Invalid value for `earnings_rate_id`, must not be `None`\"\n ) # noqa: E501\n\n self._earnings_rate_id = earnings_rate_id", "def financial_offer_id(self, financial_offer_id):\n\n self._financial_offer_id = financial_offer_id", "def conduit_committee_id(self, conduit_committee_id):\n\n self._conduit_committee_id = conduit_committee_id", "def owner_id(self, owner_id):\n\n self._owner_id = owner_id", "def payee_prefix(self, payee_prefix):\n\n self._payee_prefix = payee_prefix", "def delete_payee(self, payee_id):\n # [todo] - handle deletion failure\n # probably best done by catching exception raised if the payee has\n # children in transactions table\n\n # open a cursor\n cur = self.get_cursor()\n\n delete_payee_statement = \"DELETE FROM payees \" + \\\n \"WHERE payee_id={0}\".format(payee_id)\n cur.execute(delete_payee_statement)\n\n # close the cursor\n self.close_cursor()", "def payee_city(self, payee_city):\n\n self._payee_city = payee_city", "def cryptocurrency_deposit_request(self, walletId, currency):\n return", "def create_wallet_transfer(self, sourceWalletId, destinationWalletId, amount, currencyCode):\n return", "def set_player_id(self, player_id):\n self.player_id = player_id", "def set_id(self, player_id):\n pass", "def set_chain_id(self, chain_id):\n ## check for conflicting chain_id in the structure\n if self.model is not None:\n chk_chain = self.model.get_chain(chain_id)\n if chk_chain is not None or chk_chain != self:\n raise ChainOverwrite()\n\n Segment.set_chain_id(self, chain_id)\n\n ## resort the parent structure\n if self.model is not None:\n self.model.chain_list.sort()", "def vpc_id(self, vpc_id):\n self._vpc_id = vpc_id", "def owner_id(self, owner_id):\n self._owner_id = owner_id", "def trade_reduced_id(self, trade_reduced_id):\n\n self._trade_reduced_id = trade_reduced_id", "def account_amount(self, account_amount):\n\n self._account_amount = account_amount", "async def test_fail_wrong_wallet_id(self, conn, user_with_wallet):\n amount = Decimal(2.5)\n\n with pytest.raises(WalletDoesNotExists) as exc:\n await add_to_wallet(\n conn,\n wallet_id=2,\n amount=amount,\n )\n assert str(exc.value) == 'Wallet does not exists'", "def player_id(self, player_id):\n\n self._player_id = player_id", "def player_id(self, player_id):\n\n self._player_id = player_id", "def save(self):\n if not self.fileKey:\n log.error(\"attempted to save a closed wallet\")\n return\n encrypted = self.fileKey.encrypt(tinyjson.dump(self).encode()).hex()\n w = tinyjson.dump({\n \"keyparams\": self.fileKey.params(),\n \"wallet\": encrypted,\n })\n helpers.saveFile(self.path, w)", "def edit_transaction(self, trans_id, date, payee_id, description, amount):\n # [todo] - all parameters except trans_id optional, fill others with\n # current values\n\n # [todo] - validate transaction_id\n # [todo] - validate new values\n\n # open a cursor\n cur = self.get_cursor()\n\n edit_trans_statement = \"UPDATE transactions \" + \\\n \"SET date='{0}-{1}-{2}', \".format(date.year,\n date.month,\n date.day) + \\\n \"payee_id='{0}', \".format(payee_id) + \\\n \"description='{0}', \".format(description) + \\\n \"amount='{0}' \".format(amount) + \\\n \"WHERE transaction_id={0}\".format(trans_id)\n\n cur.execute(edit_trans_statement)\n\n # close the cursor\n self.close_cursor()", "def survey_response_id(self, survey_response_id):\n\n self._survey_response_id = survey_response_id", "def response_status_id(self, response_status_id):\n\n self._response_status_id = response_status_id", "def bot_owner_id(self, bot_owner_id):\n\n self._bot_owner_id = bot_owner_id", "def edit_payee(self, payee_id, new_payee_name):\n # [todo] - add check that new_payee_name is unique\n\n # open a cursor\n cur = self.get_cursor()\n\n edit_payee_statement = \"UPDATE payees \" + \\\n \"SET payee_name='{0}' \".format(new_payee_name) + \\\n \"WHERE payee_id={0}\".format(payee_id)\n\n cur.execute(edit_payee_statement)\n\n # close the cursor\n self.close_cursor()", "def chain_id(self, chain_id):\n if chain_id is None:\n raise ValueError(\"Invalid value for `chain_id`, must not be `None`\") # noqa: E501\n\n self._chain_id = chain_id", "def driver_id(self, driver_id):\n\n self._driver_id = driver_id", "def vendor_id(self, vendor_id):\n\n self._vendor_id = vendor_id", "def SetToolId(self, id):\r\n\r\n self.tool_id = id", "def set_amount(self, amount):\n self.amount = amount", "def tenant_id(self, tenant_id):\n\n self._tenant_id = tenant_id", "def tenant_id(self, tenant_id):\n\n self._tenant_id = tenant_id", "def address_id(self, address_id):\n\n self._address_id = address_id", "def address_id(self, address_id):\n\n self._address_id = address_id", "def set(self, amount: float, reason: str = \"\") -> \"Bank\":\n\n self.__record_ledger__(amount - self.balance, reason) # Because math\n self.balance = amount\n return self", "async def atomic_transfer(\n self, amount: int, target_wallet: \"Wallet\", nonce: str\n ) -> None:\n\n if target_wallet == self:\n raise ValueError(\"Impossible to transfer funds to self\")\n\n transaction = Transaction(\n wallet_id=self.wallet_id,\n nonce=nonce,\n type=TransactionType.TRANSFER,\n data={\"amount\": amount, \"target_wallet\": target_wallet.wallet_id},\n )\n\n try:\n await self.storage.transaction_write_items(\n items=[\n self.storage.item_factory.put_idempotency_item(\n pk=transaction.unique_id, data=transaction.as_dict()\n ),\n self.storage.item_factory.update_atomic_decrement(\n pk=self.unique_id, update_key=self.BALANCE_KEY, amount=amount\n ),\n self.storage.item_factory.update_atomic_increment(\n pk=target_wallet.unique_id,\n update_key=self.BALANCE_KEY,\n amount=amount,\n ),\n ]\n )\n except storage.exceptions.TransactionMultipleError as e:\n if e.errors[0]:\n raise crud.exceptions.WalletTransactionAlreadyRegisteredError(\n f\"Transaction with nonce {nonce} already registered.\"\n )\n\n if e.errors[1]:\n raise crud.exceptions.WalletInsufficientFundsError(\n \"Wallet has insufficient funds to \"\n f\"complete operation: {str(e.errors[1])}\"\n )\n\n if e.errors[2]:\n raise crud.exceptions.WalletNotFoundError(\n f\"Wallet does not exists: {str(e.errors[2])}\"\n )\n\n raise crud.exceptions.BaseWalletError(str(e))", "def merchant_id(self, merchant_id):\n if merchant_id is None:\n raise ValueError(\"Invalid value for `merchant_id`, must not be `None`\") # noqa: E501\n\n self._merchant_id = merchant_id", "def chain_id(self, chain_id):\n if self.local_vars_configuration.client_side_validation and chain_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `chain_id`, must not be `None`\") # noqa: E501\n allowed_values = [\"kcitymarket\", \"ksupermarket\", \"kmarket\", \"nokm\", \"kmyllypuro\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and chain_id not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `chain_id` ({0}), must be one of {1}\" # noqa: E501\n .format(chain_id, allowed_values)\n )\n\n self._chain_id = chain_id", "def econsent_signature(self, econsent_signature):\n\n self._econsent_signature = econsent_signature", "def payment_reference_id(self, payment_reference_id):\n\n self._payment_reference_id = payment_reference_id", "def create_wallet(self, walletName):\n return", "def related_client_id(self, related_client_id):\n\n self._related_client_id = related_client_id", "def currency_id(self, currency_id):\n\n self._currency_id = currency_id", "def currency_id(self, currency_id):\n\n self._currency_id = currency_id", "def currency_id(self, currency_id):\n\n self._currency_id = currency_id", "async def create_wallet(self, user_id: str) -> None:\n self._wallet_id = self.generate_wallet_id()\n\n transaction = Transaction(\n wallet_id=self.wallet_id,\n type=TransactionType.CREATE,\n data={\"amount\": self.DEFAULT_BALANCE},\n nonce=None,\n )\n\n # todo: create separate user storage\n user_pk = f\"{user_id}{self.USER_KEY_POSTFIX}\"\n\n try:\n await self.storage.transaction_write_items(\n items=[\n # create transaction record\n self.storage.item_factory.put_idempotency_item(\n pk=transaction.unique_id, data=transaction.as_dict()\n ),\n # create wallet\n self.storage.item_factory.put_idempotency_item(\n pk=self.unique_id,\n data={self.BALANCE_KEY: self.DEFAULT_BALANCE},\n ),\n # create link between wallet and user\n self.storage.item_factory.put_idempotency_item(\n pk=user_pk, data={self.USER_WALLET_KEY: self.wallet_id}\n ),\n ]\n )\n except storage.exceptions.TransactionMultipleError as e:\n if e.errors[0]:\n raise crud.exceptions.WalletTransactionAlreadyRegisteredError(\n str(e.errors[0])\n )\n\n raise crud.exceptions.WalletAlreadyExistsError(\n f\"Wallet already exists for the user {user_pk}\"\n )", "def vendorid(self, vendorid):\n\n self._vendorid = vendorid", "def target_version_id(self, target_version_id):\n\n self._target_version_id = target_version_id", "def credential_id(self, credential_id):\n\n self._credential_id = credential_id", "def conversation_participant_uuid(self, conversation_participant_uuid):\n\n self._conversation_participant_uuid = conversation_participant_uuid", "def back_reference_transaction_id(self, back_reference_transaction_id):\n\n self._back_reference_transaction_id = back_reference_transaction_id", "def get_wallet_trades(self, walletId, filters={}):\n return", "def owner_id(self, owner_id):\n if owner_id is None:\n raise ValueError(\"Invalid value for `owner_id`, must not be `None`\") # noqa: E501\n\n self._owner_id = owner_id", "def add_payee(self, payee_name):\n # [todo] - add check that payee_name is unique\n\n # open a cursor\n cur = self.get_cursor()\n\n self.reset_auto_increment('payees')\n\n # add payee with given name\n add_payee_statement = \"INSERT INTO payees \" + \\\n \"VALUES ('0', '{0}')\".format(payee_name)\n\n cur.execute(add_payee_statement)\n\n # close cursor\n self.close_cursor()", "def target_element_id(self, target_element_id):\n\n self._target_element_id = target_element_id", "def bank_id(self, bank_id):\n\n self._bank_id = bank_id", "def person_id(self, person_id):\n\n self._person_id = person_id", "def seller(self, seller):\n\n self._seller = seller", "def payee_suffix(self, payee_suffix):\n\n self._payee_suffix = payee_suffix", "def conversation_participant_arn(self, conversation_participant_arn):\n\n self._conversation_participant_arn = conversation_participant_arn", "def target_id(self, target_id):\n\n self._target_id = target_id", "def payee_last_name(self, payee_last_name):\n\n self._payee_last_name = payee_last_name", "def region_id(self, region_id):\n\n self._region_id = region_id", "def warehouse_id(self, warehouse_id):\n\n self._warehouse_id = warehouse_id" ]
[ "0.7311907", "0.573136", "0.54004514", "0.53062975", "0.53006345", "0.5269258", "0.5153421", "0.5153421", "0.4994353", "0.4974469", "0.49704325", "0.49286303", "0.49132255", "0.4910477", "0.4855245", "0.48479044", "0.48479044", "0.48479044", "0.48202246", "0.47650665", "0.47486928", "0.46032292", "0.46005285", "0.45976615", "0.45579433", "0.45507455", "0.4522407", "0.45100296", "0.44900107", "0.44844285", "0.44844285", "0.44844285", "0.44844285", "0.44844285", "0.44844285", "0.4466273", "0.4456583", "0.44106862", "0.4406252", "0.43890935", "0.43718177", "0.43627697", "0.4357172", "0.43446112", "0.4343733", "0.43160343", "0.43076828", "0.4293091", "0.42930892", "0.42887366", "0.42837986", "0.42821625", "0.42714334", "0.42671028", "0.42671028", "0.42577794", "0.42437744", "0.42433012", "0.42049944", "0.4200478", "0.41982496", "0.41944152", "0.41916096", "0.41866976", "0.418428", "0.41725403", "0.41527528", "0.41527528", "0.41502434", "0.41502434", "0.41342852", "0.4133384", "0.41307014", "0.41287634", "0.41084966", "0.41079408", "0.4106682", "0.41026068", "0.40926933", "0.40926933", "0.40926933", "0.4080517", "0.4076657", "0.4074848", "0.4073743", "0.40714297", "0.40650514", "0.40650278", "0.40564758", "0.40563613", "0.40522552", "0.40447494", "0.40430373", "0.40418872", "0.40412688", "0.40405786", "0.40387848", "0.40274265", "0.40220043", "0.40192178" ]
0.81508285
0
Sets the payer_wallet_id of this EscrowTransactionResponse.
def payer_wallet_id(self, payer_wallet_id): self._payer_wallet_id = payer_wallet_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def payee_wallet_id(self, payee_wallet_id):\n\n self._payee_wallet_id = payee_wallet_id", "def payor_id(self, payor_id):\n\n self._payor_id = payor_id", "def set_merchant_transaction_id(self, transaction_id):\n self.merchant_transaction_id = transaction_id", "def merchant_id(self, merchant_id):\n\n self._merchant_id = merchant_id", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def response_id(self, response_id):\n\n self._response_id = response_id", "def payment_id(self, payment_id):\n\n self._payment_id = payment_id", "def get_wallet(self, walletId):\n return", "def seller(self, seller):\n\n self._seller = seller", "def payee_zip(self, payee_zip):\n\n self._payee_zip = payee_zip", "def payee_name(self, payee_name):\n\n self._payee_name = payee_name", "def merchant_id(self, merchant_id):\n if merchant_id is None:\n raise ValueError(\"Invalid value for `merchant_id`, must not be `None`\") # noqa: E501\n\n self._merchant_id = merchant_id", "def driver_id(self, driver_id):\n\n self._driver_id = driver_id", "def buyer(self, buyer):\n\n self._buyer = buyer", "def survey_response_id(self, survey_response_id):\n\n self._survey_response_id = survey_response_id", "def set_AWSMerchantId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMerchantId', value)", "def transaction_id(self, transaction_id):\n if transaction_id is None:\n raise ValueError(\"Invalid value for `transaction_id`, must not be `None`\") # noqa: E501\n\n self._transaction_id = transaction_id", "def earnings_rate_id(self, earnings_rate_id):\n\n self._earnings_rate_id = earnings_rate_id", "def __init__(self, id=None, payee_wallet_id=None, payer_wallet_id=None, amount=None, withdrawn=None, escrow_address=None, record_status=None, create_date=None, update_date=None): # noqa: E501 # noqa: E501\n\n self._id = None\n self._payee_wallet_id = None\n self._payer_wallet_id = None\n self._amount = None\n self._withdrawn = None\n self._escrow_address = None\n self._record_status = None\n self._create_date = None\n self._update_date = None\n self.discriminator = None\n\n if id is not None:\n self.id = id\n if payee_wallet_id is not None:\n self.payee_wallet_id = payee_wallet_id\n if payer_wallet_id is not None:\n self.payer_wallet_id = payer_wallet_id\n if amount is not None:\n self.amount = amount\n if withdrawn is not None:\n self.withdrawn = withdrawn\n if escrow_address is not None:\n self.escrow_address = escrow_address\n if record_status is not None:\n self.record_status = record_status\n if create_date is not None:\n self.create_date = create_date\n if update_date is not None:\n self.update_date = update_date", "def vendor_id(self, vendor_id):\n\n self._vendor_id = vendor_id", "def set_received_txn_response(self, transaction_id, origin, code, response_dict):\n\n return self.db.simple_insert(\n table=\"received_transactions\",\n values={\n \"transaction_id\": transaction_id,\n \"origin\": origin,\n \"response_code\": code,\n \"response_json\": db_binary_type(encode_canonical_json(response_dict)),\n \"ts\": self._clock.time_msec(),\n },\n or_ignore=True,\n desc=\"set_received_txn_response\",\n )", "def save(self, *args, **kwargs):\n wallet = self.wallet.withdraw(self.value)\n super(Payment, self).save(*args, **kwargs)", "def payee_state(self, payee_state):\n\n self._payee_state = payee_state", "def bank_id(self, bank_id):\n\n self._bank_id = bank_id", "def set_player_id(self, player_id):\n self.player_id = player_id", "def player_id(self, player_id):\n\n self._player_id = player_id", "def player_id(self, player_id):\n\n self._player_id = player_id", "def vendorid(self, vendorid):\n\n self._vendorid = vendorid", "def set_up_trader_id(trader_dict):\n trader_id = 0\n for name, trader in trader_dict.items():\n trader.trader_id = str(trader_id)\n trader_id += 1", "def merchant_order_no(self, merchant_order_no):\n\n self._merchant_order_no = merchant_order_no", "def payee(self, payee_id: str):\n return get_from_list(self.payees, \"id\", payee_id)", "def envelope_id(self, envelope_id):\n\n self._envelope_id = envelope_id", "def payor_name(self, payor_name):\n if payor_name is None:\n raise ValueError(\"Invalid value for `payor_name`, must not be `None`\") # noqa: E501\n\n self._payor_name = payor_name", "def gateway_id(self, gateway_id):\n\n self._gateway_id = gateway_id", "def roto_wire_player_id(self, roto_wire_player_id):\n\n self._roto_wire_player_id = roto_wire_player_id", "def setAccount(self, account_id):\n self.data_struct['_setAccount'] = account_id", "def response_status_id(self, response_status_id):\n\n self._response_status_id = response_status_id", "def set_id(self, player_id):\n pass", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n for atm in self.iter_alt_loc():\n atm.chain_id = chain_id", "def transaction_amount(self, transaction_amount):\n\n self._transaction_amount = transaction_amount", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n self.chain_id = chain_id\n\n for frag in self.iter_fragments():\n frag.set_chain_id(chain_id)", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def assignDealer(self):\n\t\t_, index = self.findNthPlayerFromSeat(self.curDealerSeatNo, 1)\n\t\tself.curDealerSeatNo = index", "def account_id(self, account_id):\n self._account_id = account_id", "def payment_reference_id(self, payment_reference_id):\n\n self._payment_reference_id = payment_reference_id", "def setBER(self, BER):\n \n self.BER = BER", "def button_id(self, button_id):\n\n self._button_id = button_id", "def vpc_id(self, vpc_id):\n self._vpc_id = vpc_id", "def related_client_id(self, related_client_id):\n\n self._related_client_id = related_client_id", "def execute_account_payment(self, payer_id, payment_txn, user):\n order = payment_txn.order\n payment = paypalrestsdk.Payment.find(payment_txn.get_param('id'), api=self.api)\n\n if payment.execute({'payer_id': payer_id}):\n with transaction.atomic():\n payment_txn.status = Transaction.STATUS_APPROVED\n payment_txn.add_param('sale_id', unicode(payment.transactions[0].related_resources[0].sale.id), user)\n payment_txn.save()\n\n order.payment_status = Order.PAYMENT_PAID\n order.updated_by = unicode(user)\n order.save()\n else:\n with transaction.atomic():\n payment_txn.status = Transaction.STATUS_FAILED\n payment_txn.error_message = payment.error['message']\n payment_txn.save()\n\n raise DoorstepError('We failed to process your PayPal account at the moment, please try again later!')", "def driver_id(self, driver_id: int):\n if driver_id is None:\n raise ValueError(\"Invalid value for `driver_id`, must not be `None`\") # noqa: E501\n\n self._driver_id = driver_id", "def reviewer_id(self, reviewer_id: int):\n\n self._reviewer_id = reviewer_id", "def tenant_id(self, tenant_id):\n\n self._tenant_id = tenant_id", "def tenant_id(self, tenant_id):\n\n self._tenant_id = tenant_id", "def merchant(self, merchant):\n if merchant is None:\n raise ValueError(\"Invalid value for `merchant`, must not be `None`\") # noqa: E501\n\n self._merchant = merchant", "def account_bank_id(self, account_bank_id):\n\n self._account_bank_id = account_bank_id", "def payment_transaction(self, acquirer_id):\n cr, uid, context = request.cr, request.uid, request.context\n payment_obj = request.registry.get('payment.acquirer')\n transaction_obj = request.registry.get('payment.transaction')\n order = request.website.sale_get_order(context=context)\n\n if not order or not order.order_line or acquirer_id is None:\n return request.redirect(\"/shop/checkout\")\n\n assert order.partner_id.id != request.website.partner_id.id\n\n # find an already existing transaction\n tx = request.website.sale_get_transaction()\n if tx:\n tx_id = tx.id\n if tx.sale_order_id.id != order.id or tx.state in ['error', 'cancel'] or tx.acquirer_id.id != acquirer_id:\n tx = False\n tx_id = False\n elif tx.state == 'draft': # button cliked but no more info -> rewrite on tx or create a new one ?\n tx.write(dict(transaction_obj.on_change_partner_id(cr, SUPERUSER_ID, None, order.partner_id.id, context=context).get('values', {}), amount=order.amount_total))\n if not tx:\n tx_id = transaction_obj.create(cr, SUPERUSER_ID, {\n 'acquirer_id': acquirer_id,\n 'type': 'form',\n 'amount': order.amount_total,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'partner_id': order.partner_id.id,\n 'partner_country_id': order.partner_id.country_id.id,\n 'reference': request.env['payment.transaction'].get_next_reference(order.name),\n 'sale_order_id': order.id,\n }, context=context)\n request.session['sale_transaction_id'] = tx_id\n tx = transaction_obj.browse(cr, SUPERUSER_ID, tx_id, context=context)\n\n # update quotation\n request.registry['sale.order'].write(\n cr, SUPERUSER_ID, [order.id], {\n 'payment_acquirer_id': acquirer_id,\n 'payment_tx_id': request.session['sale_transaction_id']\n }, context=context)\n\n return payment_obj.render(\n cr, SUPERUSER_ID, tx.acquirer_id.id,\n tx.reference,\n order.amount_total,\n order.pricelist_id.currency_id.id,\n partner_id=order.partner_shipping_id.id or order.partner_invoice_id.id,\n tx_values={\n 'return_url': '/shop/payment/validate',\n },\n context=dict(context, submit_class='btn btn-primary', submit_txt=_('Оформить')))", "def bank_transaction_code(self, bank_transaction_code):\n\n self._bank_transaction_code = bank_transaction_code", "def SetToolId(self, id):\r\n\r\n self.tool_id = id", "def earnings_rate_id(self, earnings_rate_id):\n if earnings_rate_id is None:\n raise ValueError(\n \"Invalid value for `earnings_rate_id`, must not be `None`\"\n ) # noqa: E501\n\n self._earnings_rate_id = earnings_rate_id", "def save(self):\n if not self.fileKey:\n log.error(\"attempted to save a closed wallet\")\n return\n encrypted = self.fileKey.encrypt(tinyjson.dump(self).encode()).hex()\n w = tinyjson.dump({\n \"keyparams\": self.fileKey.params(),\n \"wallet\": encrypted,\n })\n helpers.saveFile(self.path, w)", "def sent_by_recipient_id(self, sent_by_recipient_id):\n\n self._sent_by_recipient_id = sent_by_recipient_id", "def warehouse_id(self, warehouse_id):\n\n self._warehouse_id = warehouse_id", "def enclosure_id(self, enclosure_id):\n\n self._enclosure_id = enclosure_id", "def trade_reduced_id(self, trade_reduced_id):\n\n self._trade_reduced_id = trade_reduced_id", "def create_wallet(self, walletName):\n return", "def setBER(self, ber: float) -> float:\n self.BER = ber", "def person_id(self, person_id):\n\n self._person_id = person_id", "def set_writer(self, writer):\n self.writer = writer", "def payment_type_id(self, payment_type_id):\n\n self._payment_type_id = payment_type_id", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n self.chain_id = chain_id\n\n for atm in self.iter_atoms():\n atm.set_chain_id(chain_id)", "async def create_wallet(self, user_id: str) -> None:\n self._wallet_id = self.generate_wallet_id()\n\n transaction = Transaction(\n wallet_id=self.wallet_id,\n type=TransactionType.CREATE,\n data={\"amount\": self.DEFAULT_BALANCE},\n nonce=None,\n )\n\n # todo: create separate user storage\n user_pk = f\"{user_id}{self.USER_KEY_POSTFIX}\"\n\n try:\n await self.storage.transaction_write_items(\n items=[\n # create transaction record\n self.storage.item_factory.put_idempotency_item(\n pk=transaction.unique_id, data=transaction.as_dict()\n ),\n # create wallet\n self.storage.item_factory.put_idempotency_item(\n pk=self.unique_id,\n data={self.BALANCE_KEY: self.DEFAULT_BALANCE},\n ),\n # create link between wallet and user\n self.storage.item_factory.put_idempotency_item(\n pk=user_pk, data={self.USER_WALLET_KEY: self.wallet_id}\n ),\n ]\n )\n except storage.exceptions.TransactionMultipleError as e:\n if e.errors[0]:\n raise crud.exceptions.WalletTransactionAlreadyRegisteredError(\n str(e.errors[0])\n )\n\n raise crud.exceptions.WalletAlreadyExistsError(\n f\"Wallet already exists for the user {user_pk}\"\n )", "def set_follower(self, follower):\n self.follower = follower", "def slot_wallet_changed(self, gox, _dummy):\r\n pass", "def wallet_name(self) -> str:\n\n return self._wallet_name", "def set_chain_id(self, chain_id):\n ## check for conflicting chain_id in the structure\n if self.model is not None:\n chk_chain = self.model.get_chain(chain_id)\n if chk_chain is not None or chk_chain != self:\n raise ChainOverwrite()\n\n Segment.set_chain_id(self, chain_id)\n\n ## resort the parent structure\n if self.model is not None:\n self.model.chain_list.sort()", "async def test_fail_wrong_wallet_id(self, conn, user_with_wallet):\n amount = Decimal(2.5)\n\n with pytest.raises(WalletDoesNotExists) as exc:\n await add_to_wallet(\n conn,\n wallet_id=2,\n amount=amount,\n )\n assert str(exc.value) == 'Wallet does not exists'", "def set_writer(self, _writer):\n self.writer = _writer", "def back_reference_transaction_id(self, back_reference_transaction_id):\n\n self._back_reference_transaction_id = back_reference_transaction_id", "def pay_fee(self, fee):\n self.wallet -= fee", "def target_version_id(self, target_version_id):\n\n self._target_version_id = target_version_id", "def period_id(self, period_id):\n\n self._period_id = period_id", "def sent_by_user_id(self, sent_by_user_id):\n\n self._sent_by_user_id = sent_by_user_id", "def player_b_id(self, player_b_id):\n\n self._player_b_id = player_b_id", "def financial_offer_id(self, financial_offer_id):\n\n self._financial_offer_id = financial_offer_id", "def signer_id_type(self, signer_id_type):\n\n self._signer_id_type = signer_id_type", "def attempt_id(self, attempt_id):\n\n self._attempt_id = attempt_id", "def attempt_id(self, attempt_id):\n\n self._attempt_id = attempt_id", "def payment_date(self, payment_date):\n\n self._payment_date = payment_date", "def payment_date(self, payment_date):\n\n self._payment_date = payment_date", "def create_wallet_transfer(self, sourceWalletId, destinationWalletId, amount, currencyCode):\n return", "def region_id(self, region_id):\n\n self._region_id = region_id", "def buyTradedVal(self, buyTradedVal):\n\n self._buyTradedVal = buyTradedVal", "def _Dynamic_Rollback(self, transaction, transaction_response,\n request_id=None):\n transaction.set_app(self.project_id)\n\n try:\n del self.__tx_actions[transaction.handle()]\n except KeyError:\n pass\n\n self._RemoteSend(transaction, transaction_response, \"Rollback\", request_id)\n \n return transaction_response" ]
[ "0.7696529", "0.60767055", "0.56332916", "0.56299317", "0.5272401", "0.5272401", "0.5213051", "0.5161533", "0.51283133", "0.5097511", "0.4929194", "0.48645753", "0.48536748", "0.48232916", "0.4775775", "0.47211295", "0.47165722", "0.4694163", "0.46884617", "0.4682201", "0.46582586", "0.4654451", "0.46310508", "0.46160367", "0.45630452", "0.45415622", "0.45124477", "0.45124477", "0.45003188", "0.44916996", "0.4485404", "0.44851205", "0.44840613", "0.44625616", "0.44425592", "0.4437669", "0.44352424", "0.44340873", "0.44287217", "0.44245887", "0.44144008", "0.44078076", "0.4400751", "0.4400751", "0.4400751", "0.4400751", "0.4400751", "0.4400751", "0.439078", "0.43881828", "0.43826675", "0.43748286", "0.4372818", "0.43527657", "0.43237534", "0.42958722", "0.42867443", "0.4253922", "0.42526284", "0.42526284", "0.4234864", "0.42340237", "0.421868", "0.42179352", "0.42025787", "0.4195425", "0.41910407", "0.41896114", "0.41885078", "0.4182257", "0.41777664", "0.41766298", "0.41750005", "0.4173255", "0.4157686", "0.41505402", "0.4145466", "0.4142146", "0.41299966", "0.41191256", "0.4116482", "0.4116312", "0.4114315", "0.41091117", "0.4089075", "0.4081695", "0.40815058", "0.405305", "0.40392867", "0.4028237", "0.40255752", "0.4021674", "0.4002526", "0.4002526", "0.4002053", "0.4002053", "0.39999598", "0.39961088", "0.3991199", "0.39866802" ]
0.84063405
0
Sets the amount of this EscrowTransactionResponse.
def amount(self, amount): self._amount = amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_amount(self, amount):\n self.amount = amount", "def transaction_amount(self, transaction_amount):\n\n self._transaction_amount = transaction_amount", "def amount(self, amount):\n if amount is None:\n raise ValueError(\"Invalid value for `amount`, must not be `None`\") # noqa: E501\n\n self._amount = amount", "def amount(self, amount):\n if amount is None:\n raise ValueError(\"Invalid value for `amount`, must not be `None`\") # noqa: E501\n\n self._amount = amount", "def amount(self, amount):\n if self.local_vars_configuration.client_side_validation and amount is None: # noqa: E501\n raise ValueError(\"Invalid value for `amount`, must not be `None`\") # noqa: E501\n\n self._amount = amount", "def account_amount(self, account_amount):\n\n self._account_amount = account_amount", "def amount(self, amount: float):\n if amount is None:\n raise ValueError(\"Invalid value for `amount`, must not be `None`\") # noqa: E501\n\n self._amount = amount", "def set(self, amount: float, reason: str = \"\") -> \"Bank\":\n\n self.__record_ledger__(amount - self.balance, reason) # Because math\n self.balance = amount\n return self", "def amount(self) -> int:\n return self._amount", "def installment_amount(self, installment_amount: Amount):\n\n self._installment_amount = installment_amount", "def withdraw(self, amount):\n self.deposit(-amount)", "def amount(self):\n return self._amount", "def amount(self):\n return self.__amount", "def amount(self):\n return self.__amount", "def add(self, amount):\n self.amount += amount", "def withdraw(self, amount):\r\n self.balance = self.balance - amount\r\n self.transactions.append(-amount)\r\n return amount", "def withdraw(self, amount):\n self.balance -= amount", "def amount(self):\r\n return self._data['amount']", "def total_amount(self, total_amount):\n\n self._total_amount = total_amount", "def earnCoin(self, amount):\n self.coins += amount", "def deposit(self, amount):\n self.balance += amount", "def deposit(self, amount):\n self.balance += amount", "def tax_amount(self, tax_amount):\n\n self._tax_amount = tax_amount", "def tax_amount(self, tax_amount):\n\n self._tax_amount = tax_amount", "def amount(self) -> float:\n return self._amount", "def amount(self) -> float:\n return self._amount", "def money(self, money: int):\n\n self._money = money", "def net_amount(self, net_amount):\n\n self._net_amount = net_amount", "def discount_amount(self, discount_amount):\n\n self._discount_amount = discount_amount", "def discount_amount(self, discount_amount):\n\n self._discount_amount = discount_amount", "def discount_amount(self, discount_amount):\n\n self._discount_amount = discount_amount", "def set_total_amount_with_taxes(self, amount):\n self.set_value_into_input_field(self.total_amount_with_taxes_textbox_locator, amount, True)", "def give_raise(self, amount=5000):\n self.salary += amount", "def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount", "def withdraw(self, amount):\n self.balance -= amount\n if self.balance < 10:\n self.balance -= 5\n self.fees += 5", "def give_raise(self,amount=5000):\n self.salary += amount", "def deposit(self, amount) -> None:\n self._balance += amount\n return None", "def set_adjustment_charge_amount(self, amount):\n self.script_executor(\"var element = document.getElementById('%s'); element.style.display = 'block';\" % self.charge_amount_textbox_id)\n self.set_value_into_input_field(self.adjustment_charge_amount_textbox_locator, amount)", "def set_vat_amount(self, vat_amount):\n self.set_value_into_input_field(self.vat_amount_textbox_locator, vat_amount, True)", "def system_amount(self, system_amount):\n\n self._system_amount = system_amount", "def deposit(self, amount):\n self.transactions += [('deposit', amount)]\n self.balance = self.balance + amount\n return self.balance", "def deposit(self, amount):\r\n self.balance = self.balance + amount\r\n amount = abs(amount)\r\n self.transactions.append(+amount)\r\n return amount", "def give_raise(self, amount=5000):\n \n self.annual_salary += amount", "def offset(self, amount):\n self._offset = amount\n return self", "def update_amount(self, new_amount=None):\n if not new_amount:\n new_amount = self.amount\n if new_amount < self.min:\n new_amount = self.min\n if new_amount > self.max:\n new_amount = self.max\n self.amount = new_amount\n self.build_bar()", "def transaction_amount_paid(self, transaction_amount_paid):\n\n self._transaction_amount_paid = transaction_amount_paid", "def deposit(self, amount):\n self.dep = amount\n self.balance += self.dep", "def get_amount(self):\n\t\tif self.amount is not None:\n\t\t\treturn self.amount\n\t\treturn abort(400, {\"message\" : \"please provide the amount to process\"})", "def pay(self, amt: float):\n self._money += amt", "def deposit(self, amount, description=''):\n self.ledger.append({'amount': amount, 'description': description})", "def deposit(self, amount):\n self.balance = self.balance + amount\n return self.balance", "def limit(self, amount):\n self._limit = amount\n return self", "def getAmount(self):\n return self.amount", "def instructed_amount(self, instructed_amount):\n\n self._instructed_amount = instructed_amount", "def set_number_served(self, amount):\n self.number_served = amount", "def weee_tax_applied_row_amount(self, weee_tax_applied_row_amount):\n\n self._weee_tax_applied_row_amount = weee_tax_applied_row_amount", "def deposit(self, amount):\n self.balance += amount\n self.transactions.append((\"Deposit\", amount))\n print \"Your new balance is $%d.\" % self.balance", "def bet(self, amount):\n self._chips -= amount\n self._bet += amount\n assert self._chips >= 0", "def set_bribe(self, bribe_amount):\r\n self.bribe = bribe_amount", "def deposit(self, amount):\n self.balance += amount\n return self.balance", "def deposit(self, amount):\n self.balance += amount\n return self.balance", "def deposit(self, amount):\n self.balance += amount\n return self.balance", "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def deposit(self, amount):\n self.__balance += amount\n return self.__balance", "def set_credit_amount(self, credit_amount):\n self.set_value_into_input_field(self.credit_amount_textbox_locator, credit_amount)", "def make_payment(self,amount):\n self._balance-=amount", "def give_raise(self, amount=5000):\n\t\tself.annual_salary += amount", "def deposit(self, amount, explanation = ''):\n self.ledger.append({'amount':amount, 'explanation': explanation})", "def open(self, amount: float) -> None:\n if amount < 0:\n raise ValueError(\"Amount must not be negative.\")\n self.__updated_total = round((self.get_total() + amount) * 100.0)\n self.__open = True", "def balance(self, balance):\n\n self._balance = balance", "def balance(self, balance):\n\n self._balance = balance", "def balance(self, balance):\n\n self._balance = balance", "def quantity(self, value):\n self._quantity = Decimal(value)", "def withdraw(self, amount):\n self.withdrw = amount\n \n if (self.balance-self.withdrw) < 0:\n self.balance = self.balance - 5 - self.withdrw\n self.fee += 5\n else:\n self.balance -= self.withdrw", "def set_balance(self, balance=0.0):\n self.balance = balance", "def set_balance(self, balance=0.0):\n self.balance = balance", "def replenish(self, amount: int):\n self._inventory += amount", "def transaction_percentage(self, transaction_percentage):\n\n self._transaction_percentage = transaction_percentage", "def amount(self):\n return(self.order_master.amount)", "def set_charges_grid_amount(self, amount):\n self.set_value_in_grid_column(self.charges_grid_div_id, self.amount_column_name, amount, True)\n page_header_element = self.wait().until(EC.element_to_be_clickable(self.page_header_locator), 'page header locator not found before specified time out')\n page_header_element.click()", "def rebuy(self, amount):\n self._chips += amount\n self._decrease_money(amount)", "def set_food_level(self, amount):\n self.plant = amount", "def increment(self, amount):\n pass", "def set_total_amount(self, total_amount):\n self.set_value_into_input_field(self.total_amount_textbox_locator, total_amount, True)\n total_amount_label_locator = (By.XPATH, \"//label[text()='Total Amount']\")\n self.click_element(total_amount_label_locator)", "def _set_instructed_amount_33B(self, val):\n self.swift_obj.CurrencyInstructedAmount = val\n self.swift_obj.CurrencyInstructedAmount.swiftTag = \"33B\"", "def make_payment(self, amount):\n if not isinstance(amount, (int, float)):\n raise TypeError()\n if amount < 0: raise ValueError()\n self._balance -= amount", "def addMoney(self, deposit_amount):\r\n self.balance_amt = self.balance_amt + deposit_amount", "def bet(self, amount: float) -> None:\n if amount <= self.balance:\n self.__bet = amount\n else:\n raise ValueError(\"Amount exceeds balance.\")", "def set_transfer(to, amount):\n balance = ebb.balanceOf(to)\n ebb.approve(accounts[0], balance, {\"from\": to})\n ebb.transferFrom(to, accounts[0], balance, {\"from\": _cfg.OWNER})\n assert ebb.balanceOf(to) == 0\n ebb.transfer(to, Cent(amount), {\"from\": _cfg.OWNER})", "def withdraw(self, amount):\r\n balance = self['get']('balance')\r\n if amount > balance:\r\n return 'Insufficient funds'\r\n self['set']('balance', balance - amount)\r\n return self['get']('balance')", "def make_payment(self, amount):\n if not isinstance(amount, (int, float)):\n raise TypeError('Amount must be numeric')\n self._balance -= amount" ]
[ "0.7653338", "0.7072966", "0.6543301", "0.6543301", "0.64166534", "0.6306864", "0.6261621", "0.6219271", "0.609621", "0.6081896", "0.60315126", "0.6025742", "0.60226005", "0.60226005", "0.6014517", "0.5932473", "0.5896679", "0.5890197", "0.5884229", "0.58601516", "0.58400244", "0.58400244", "0.5832202", "0.5832202", "0.58124423", "0.58124423", "0.5783945", "0.57797444", "0.5738721", "0.5738721", "0.5738721", "0.5724093", "0.57015634", "0.57008964", "0.56990325", "0.5678706", "0.566764", "0.5654549", "0.5626078", "0.5623535", "0.56202275", "0.55967224", "0.5590397", "0.55886114", "0.5588086", "0.5588066", "0.5568964", "0.5566321", "0.5562391", "0.5534879", "0.5531191", "0.5525066", "0.55249804", "0.5524095", "0.5522377", "0.55193835", "0.5518317", "0.5510816", "0.548278", "0.546913", "0.546913", "0.546913", "0.5459996", "0.5459996", "0.54489213", "0.54377365", "0.5382461", "0.5377704", "0.5376225", "0.5342546", "0.5341688", "0.5341688", "0.5341688", "0.531795", "0.53073084", "0.52968466", "0.52968466", "0.5290549", "0.52669334", "0.52599114", "0.525983", "0.52512294", "0.5219827", "0.5202179", "0.519754", "0.5190271", "0.5186723", "0.5186603", "0.5168204", "0.51520586", "0.51332915", "0.51271456" ]
0.76107574
9
Sets the withdrawn of this EscrowTransactionResponse.
def withdrawn(self, withdrawn): self._withdrawn = withdrawn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r", "def withdraw(self, responder):\n self._apply_decision(self.Status.WITHDRAWN, responder)", "def withdraw(self,withdrawal_money):\r\n if self.balance < withdrawal_money:\r\n print(\"Funds are insufficient\")\r\n \r\n else:\r\n self.balance -= withdrawal_money\r\n print(\"Withdrawal Accepted\")", "def withdraw(self, amount):\n\n print(\"\\nWithdrawal - {self.name}\".format(self=self))\n\n # retrieves the available balance in the account\n availableBalance = self.getAvailableBalance()\n \n # checks for negative amount value \n if amount < 0:\n print(\"Cannot withdraw £{0:.2f}\".format(amount))\n print(\"Deposit amount cannot be a negative value.\")\n\n # checks whether amount requested is greater than the available balance\n elif amount > availableBalance:\n print(\"Cannot withdraw £{0:.2f}\".format(amount))\n print(\"Insufficient funds.\")\n\n # subtracts amount from account balance\n else:\n self.balance -= amount\n print(\"{0} has withdrew £{1:.2f}. New balance is £{2:.2f}\".format(self.name, amount, self.balance))", "def withdraw(self, amount, trigger_transaction, trans=None):\n\n #\n # validates the amount is positive\n self.validate_amount(amount)\n\n #\n # Validate the user has the amount for the withdraw\n if not self.check_sufficient_funds(amount):\n raise OverdraftException(self.user.username)\n\n #\n # creates the transaction\n category = TransactionType.objects.get(pk=TransactionTypeConstants.BonusCashWithdraw.value)\n\n #\n # makes the amount negative because it is a withdrawal\n self.create(category, -amount, trans)\n self.transaction_detail.trigger_transaction = trigger_transaction\n self.transaction_detail.save()\n\n Logger.log(ErrorCodes.INFO,\"Bonus Cash Withdraw\", self.user.username+\" withdrew \"+str(amount)+\" \"+self.accountName+\" from their account.\")", "def withdraw(self, amount):\n if self.overdrawn:\n print('You have overdrawn, please add more money!')\n return self.balance\n self.balance = self.balance - amount\n return self.balance", "def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount", "def withdraw(self, account_number: int, withdrawal: float) -> bool: \n if (withdrawal <= self._accounts[account_number][1]):\n self._accounts[account_number][1] -= withdrawal\n return True\n else:\n return False", "def Withdrawn(self, default=[None]):\n return self.data.get('metadata', {}).get('withdrawn', default)", "def post_cancel_withdraw(self, withdraw_id: 'int') -> int:\n params = {\n \"withdraw-id\": withdraw_id\n }\n\n from huobi.service.wallet.post_cancel_withdraw import PostCancelWithdrawService\n return PostCancelWithdrawService(params).request(**self.__kwargs)", "def withdraw(self, amount):\n self.transactions += [('withdraw', amount)]\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance", "def withdraw(self, request, *args, **kwargs):\n account = self.get_object()\n account_serializer = self.get_serializer()\n value = request.data.get(\"valor\", None)\n\n try:\n withdraw_result = account_serializer.withdraw(value, account)\n except ValueError as ve:\n return Response({\"detail\": \"Could not withdraw: {0}.\".format(ve),\n \"status_code\": status.HTTP_400_BAD_REQUEST}, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(withdraw_result)", "def withdrawal(cls, amount):\n if amount >= 0 and cls.is_logged_in():\n cls.__current_acct.__transaction(-amount)\n else:\n print('withdrawal error')", "def withdraw(self, amount):\n self.withdrw = amount\n \n if (self.balance-self.withdrw) < 0:\n self.balance = self.balance - 5 - self.withdrw\n self.fee += 5\n else:\n self.balance -= self.withdrw", "def withdraw(self, amount):\n self.deposit(-amount)", "def withdraw(self, cr, uid, ids, amount, context=None):\n record = self.browse(cr, uid, ids, context=context)[0]\n current_amount = record.current_amount\n withdraw_amount = record.withdraw_amount\n if amount > current_amount:\n raise osv.except_osv(_('Constraint Error'), _(\"The the amount is greater than the Current Money!\"))\n\n record.write({'current_amount':current_amount - amount,\n 'withdraw_amount':withdraw_amount + amount })\n return True", "def withdraw(self, amount):\n if amount > self.balance:\n raise ValueError('insufficient funds to withdraw $%.2f' % amount)\n self.balance -= amount\n return self.balance", "def withdraw(self, amount):\r\n balance = self['get']('balance')\r\n if amount > balance:\r\n return 'Insufficient funds'\r\n self['set']('balance', balance - amount)\r\n return self['get']('balance')", "def add_withdraw(self, withdraw_id: str, tx_id: str, apply_time: int, asset: str, amount: float, fee: float,\n auto_commit: bool = True):\n row = (withdraw_id, tx_id, apply_time, asset, amount, fee)\n self.add_row(tables.SPOT_WITHDRAW_TABLE, row, auto_commit=auto_commit)", "def withdraw(self, amount):\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance", "def withdraw(self, amount):\n self.balance -= amount", "def test_early_out_withdrawal(self):\n with FakeClock(TIME_1):\n response = self.send_post(\"Participant\", self.participant)\n participant_id = response[\"participantId\"]\n response[\"providerLink\"] = [self.provider_link_2]\n response[\"withdrawalStatus\"] = \"EARLY_OUT\"\n response[\"withdrawalTimeStamp\"] = 1563907344169\n response[\"suspensionStatus\"] = \"NOT_SUSPENDED\"\n response[\"withdrawalReason\"] = \"TEST\"\n response[\"withdrawalReasonJustification\"] = \"This was a test account.\"\n path = \"Participant/%s\" % participant_id\n self.send_put(path, response, headers={\"If-Match\": 'W/\"1\"'})\n participant = self.send_get(path)\n self.assertEqual(participant[\"withdrawalStatus\"], \"EARLY_OUT\")\n self.assertEqual(participant[\"withdrawalTime\"], '2018-01-01T00:00:00')\n self.assertEqual(participant[\"withdrawalAuthored\"], '2019-07-23T18:42:24')", "def register_withdraw(self, withdraw_intent): \n if withdraw_intent > 0:\n self.teo.register_withdraw(self, withdraw_intent)", "def withdraw(self, amount):\n message = self.account.withdraw(float(amount))\n if message:\n return message\n else:\n self.myView.displayAccount()\n return \"success\"", "def Withdrawal(self):\n self.amount = (int)(raw_input (\" Enter your withdrawal amount \"))\n return self.amount", "def withdrawal(self, amount):\n if self.balance - amount < self.minimum_balance:\n print \"This would take you below your minimum balance.\"\n return\n else:\n self.balance -= amount\n print \"Please take your cash.\"\n print \"Your balance is now $%d.\" % self.balance\n self.transactions.append((\"Withdrawal\", amount))", "def test_withdraw_success(client):\n usd = Asset.objects.create(\n code=\"USD\",\n issuer=Keypair.random().public_key,\n sep24_enabled=True,\n withdrawal_enabled=True,\n distribution_seed=Keypair.random().secret,\n )\n response = client.post(\n WITHDRAW_PATH, {\"asset_code\": usd.code, \"amount\": \"100\"}, follow=True\n )\n content = response.json()\n assert content[\"type\"] == \"interactive_customer_info_needed\"\n assert \"100\" in content[\"url\"]\n assert content.get(\"id\")\n\n t = Transaction.objects.filter(id=content.get(\"id\")).first()\n assert t\n assert t.stellar_account == \"test source address\"\n assert t.account_memo is None\n assert t.muxed_account is None\n assert t.asset.code == usd.code\n assert t.protocol == Transaction.PROTOCOL.sep24\n assert t.kind == Transaction.KIND.withdrawal\n assert t.status == Transaction.STATUS.incomplete\n assert t.receiving_anchor_account is None\n assert t.memo is None\n assert t.memo_type == Transaction.MEMO_TYPES.hash\n assert t.from_address is None", "async def legwithdraw(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"leg_session_withdraw\")\n\n if new_value:\n message = f\":white_check_mark: You will now receive DMs when you are a member of the \" \\\n f\"{self.bot.mk.LEGISLATURE_CABINET_NAME} and someone withdraws their Bill or Motion. \" \\\n f\"Note that you will never get a DM when a member of the \" \\\n f\"{self.bot.mk.LEGISLATURE_CABINET_NAME} is the one withdrawing.\"\n\n else:\n message = f\":white_check_mark: You will no longer receive DMs when you are a member of the \" \\\n f\"{self.bot.mk.LEGISLATURE_CABINET_NAME} and someone withdraws their Bill or Motion.\"\n\n await ctx.send(message)", "def withdraw(self, amount):\n if amount > self.balance:\n raise RuntimeError('Amount greater than available balance.')\n self.balance -= amount\n return self.balance", "def withdraw(self, amount):\n if amount > self.balance:\n raise RuntimeError('Amount greater than available balance.')\n self.balance -= amount\n return self.balance", "def get_withdrawal(self, withdrawal):\r\n method = self.wallet_endpoints['get_withdrawal']['method']\r\n url = self.base_url + self.wallet_endpoints['get_withdrawal']['url'].format(withdrawalId=withdrawal)\r\n req = requests.request(method, url, headers=self.get_auth_headers())\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def safeWithdrawal(self):\n if self._after_dead_line():\n # each contributor can withdraw the amount they contributed if the goal was not reached\n if not self._funding_goal_reached.get():\n amount = self._balances[self.msg.sender]\n self._balances[self.msg.sender] = 0\n if amount > 0:\n if self.icx.send(self.msg.sender, amount):\n self.FundTransfer(self.msg.sender, amount, False)\n Logger.debug(f'FundTransfer({self.msg.sender}, {amount}, False)', TAG)\n else:\n self._balances[self.msg.sender] = amount\n\n # The sales target has been met. Owner can withdraw the contribution.\n if self._funding_goal_reached.get() and self._addr_beneficiary.get() == self.msg.sender:\n if self.icx.send(self._addr_beneficiary.get(), self._amount_raised.get()):\n self.FundTransfer(self._addr_beneficiary.get(), self._amount_raised.get(), False)\n Logger.debug(f'FundTransfer({self._addr_beneficiary.get()},'\n f'{self._amount_raised.get()}, False)', TAG)\n # reset amount_raised\n self._amount_raised.set(0)\n else:\n # if the transfer to beneficiary fails, unlock contributors balance\n Logger.debug(f'Failed to send to beneficiary!', TAG)\n self._funding_goal_reached.set(False)", "def withdraw(holder):\n account = Account.query.filter_by(holder=holder).first()\n amount = request.json.get(\"amount\")\n if not account:\n return jsonify({\"error\": \"Account does not exist\"})\n if account.balance >= amount:\n account.balance -= amount\n db.session.commit()\n return jsonify(\n {\n \"holder\": account.holder,\n \"balance\": account.balance,\n \"message\": \"The withdraw has been processed\",\n }\n )\n return jsonify({\"error\": \"The account balance is insufficient\"})", "def withdraw(account, amount):\n pass", "def withdraw(self, amount):\n if amount < 0:\n return \"Amount must be >= 0\"\n elif self._balance < amount:\n return \"Insufficient funds\"\n else:\n self._balance -= amount\n return None", "def withdrawal(self, amount, explanation = ''):\n if (self.get_funds(amount)):\n self.ledger.append({'amount':-amount, 'explanation': explanation})\n return True\n else:\n return False", "def withdraw(self, asset: Asset, address: str, amount: float,\n receive_window: Optional[int] = None):\n api_params = {\n 'asset': asset.value,\n 'address': address,\n 'amount': amount,\n 'timestamp': get_current_time_milliseconds()\n }\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.post(path='/withdraw', json_data=api_params)", "def withdraw(self, amount, description=''):\n state = True\n if not(self.check_funds(amount)):\n state = False\n else:\n self.ledger.append({'amount': -amount, 'description': description})\n return state", "def withdraw_skill_from_certification_v1(self, skill_id, withdraw_request, **kwargs):\n # type: (str, WithdrawRequest_d09390b7, **Any) -> Union[ApiResponse, object, StandardizedError_f5106a89, BadRequestError_f854b05]\n operation_name = \"withdraw_skill_from_certification_v1\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'skill_id' is set\n if ('skill_id' not in params) or (params['skill_id'] is None):\n raise ValueError(\n \"Missing the required parameter `skill_id` when calling `\" + operation_name + \"`\")\n # verify the required parameter 'withdraw_request' is set\n if ('withdraw_request' not in params) or (params['withdraw_request'] is None):\n raise ValueError(\n \"Missing the required parameter `withdraw_request` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v1/skills/{skillId}/withdraw'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n if 'skill_id' in params:\n path_params['skillId'] = params['skill_id']\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n if 'withdraw_request' in params:\n body_params = params['withdraw_request']\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=None, status_code=204, message=\"Success.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=400, message=\"Server cannot process the request due to a client error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=401, message=\"The auth token is invalid/expired or doesn&#39;t have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=403, message=\"The operation being requested is not allowed.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=404, message=\"The resource being requested is not found.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=429, message=\"Exceeds the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"POST\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=None)\n\n if full_response:\n return api_response\n \n return None", "def withdraw(self, amount):\r\n self.balance = self.balance - amount\r\n self.transactions.append(-amount)\r\n return amount", "def withdraw_funds(self, dt, amount):\n # Check that amount is positive and that there is\n # enough in the portfolio to withdraw the funds\n if dt < self.current_dt:\n raise ValueError(\n 'Withdrawal datetime (%s) is earlier than '\n 'current portfolio datetime (%s). Cannot '\n 'withdraw funds.' % (dt, self.current_dt)\n )\n self.current_dt = dt\n\n if amount < 0:\n raise ValueError(\n 'Cannot debit negative amount: '\n '%0.2f from the portfolio.' % amount\n )\n\n if amount > self.cash:\n raise ValueError(\n 'Not enough cash in the portfolio to '\n 'withdraw. %s withdrawal request exceeds '\n 'current portfolio cash balance of %s.' % (\n amount, self.cash\n )\n )\n\n self.cash -= amount\n\n self.history.append(\n PortfolioEvent.create_withdrawal(self.current_dt, amount, self.cash)\n )\n\n self.logger.info(\n '(%s) Funds withdrawn from portfolio \"%s\" '\n '- Debit: %0.2f, Balance: %0.2f' % (\n self.current_dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n self.portfolio_id, round(amount, 2),\n round(self.cash, 2)\n )\n )", "def withdraw(self, amount):\n self.balance -= amount\n if self.balance < 10:\n self.balance -= 5\n self.fees += 5", "def save(self, *args, **kwargs):\n wallet = self.wallet.withdraw(self.value)\n super(Payment, self).save(*args, **kwargs)", "def withdraw(self, currency, amount, address):\n pass", "def withdraw(self, amount, budget):\r\n if budget != \"Total Balance\":\r\n assert budget in self.budgets, \"Specified budget doesn't exist\"\r\n self.budgets[budget] -= float(amount)\r\n self.balance -= float(amount)", "def isdrawn(self):\n return hasattr(self, 'drawn')", "def withdraw(self, currency, amount, address):\n return self.__call__('balance', 'withdrawcurrency',\n {\"currencyname\": currency, \n \"quantity\": amount, \n \"address\": address})", "def do_withdraw(self,args):\n try:\n address = raw_input(\"Enter the address you want to withdraw to: \")\n totalbalance = prompt(\"Do you want to withdraw your ENTIRE balance?\",False)\n if totalbalance == False:\n amount = D(raw_input(\"Enter the amount of BTC to withdraw: \"))\n else:\n amount,_ = bal()\n \n result = bitstamp.bitcoin_withdraw(address,amount)\n if result:\n print \"%s BTC successfully sent to %s\" % (amount,address)\n else:\n print \"There was an error withdrawing.\"\n except Exception as e:\n traceback.print_exc()\n print \"Unexpected Error: %s\" % e\n self.onecmd('help withdraw')", "def execute_withdraws(self):\n withdraws = [v for v in self.action_register if v['action'] == 'withdraw']\n for withdraw in withdraws:\n self.model.schedule.agents_by_type['Customer'][withdraw['unique_id']].euro_wallet -= withdraw['value']\n self.model.schedule.agents_by_type['Customer'][withdraw['unique_id']].withdrawn_euros += withdraw['value']\n self.model.schedule.agents_by_type['Customer'][withdraw['unique_id']].last_withdraw_tick = self.model.schedule.steps", "def withdraw(self, currency, amount, address):\n return self.api_query('withdraw', {\"currency\": currency, \"amount\": amount, \"address\": address})", "def Withdraw(self, pap_ref):\n\n request = self.messages.ComputePublicAdvertisedPrefixesWithdrawRequest(\n publicAdvertisedPrefix=pap_ref.Name(), project=pap_ref.project)\n\n return self.client.MakeRequests([(self._service, 'Withdraw', request)])", "def draw(self, draw_surface):\n if self.sub_event is not None:\n self.sub_event.draw(draw_surface)\n else:\n self.confirm_response.draw(draw_surface)", "def bank_withdraw_money(stub, request):\n # print(\"In method bank_withdraw_money:\")\n\n try:\n result = stub.Withdraw(request)\n except DatabaseOptFailure:\n return \"IO_Failure\"\n return result", "def withdraw_funds(self, cause_id):\n # Verify if the cause_id exists\n sp.verify(self.data.causes.contains(cause_id))\n # Verify if the sender/reciever is the owner of the cause\n sp.verify(self.data.causes[cause_id].owner == sp.sender)\n # Transfer the collected funds\n sp.send(self.data.causes[cause_id].owner, self.data.causes[cause_id].balance)\n # Reset the amount as it's withdrawn now.\n self.data.causes[cause_id].balance = sp.mutez(0)", "def do_withdraw(self, args):\n \n amount = float(input(\"How much? \"))\n \n balance = self.cur.execute(\"SELECT * FROM balance ORDER BY date DESC\").fetchone()[2]\n if amount > balance:\n print(\"Insufficient funds! Withdrawl canceled.\")\n print(\"Use the `balance` command to check your account balance\")\n return\n \n balance -= amount\n now = time()\n self.cur.execute(\"INSERT INTO withdrawls VALUES (?,?)\", (now, amount))\n self.cur.execute(\"INSERT INTO balance VALUES (?,?,?)\", (now, 0.0, balance))\n self.db.commit()\n print(\"Withdrawl complete. Your new balance is $%.2f\" % balance)", "def rule_withdraw(self, st_acct, st_amount, st_idx):\n if self.active_token_ids.get(st_acct):\n # choose from the caller's valid NFT token IDs, if there are any\n idx = int(st_idx * len(self.active_token_ids[st_acct]))\n token_id = self.active_token_ids[st_acct][idx]\n else:\n # if the caller does not own any NFTs, choose from any token ID\n token_ids = self._all_token_ids()\n idx = int(st_idx * len(token_ids))\n token_id = token_ids[idx]\n\n amount = int(st_amount * 10 ** 18)\n if self.active_token_ids.get(st_acct):\n # when the action is possible, don't exceed the max underlying balance\n balance = self.swap.token_info(token_id)[\"underlying_balance\"]\n amount = min(amount, balance)\n\n if self.active_token_ids.get(st_acct):\n self.swap.withdraw(token_id, amount, {\"from\": st_acct})\n if balance == amount:\n self.active_token_ids[st_acct].remove(token_id)\n self.used_token_ids.append(token_id)\n else:\n with brownie.reverts():\n self.swap.withdraw(token_id, amount, {\"from\": st_acct})", "def withdraw_money(c_id, amount):\n return ar.withdraw_money(c_id, amount)", "def over_payout(self, over_payout):\n\n self._over_payout = over_payout", "def buyTrdAvg(self, buyTrdAvg):\n\n self._buyTrdAvg = buyTrdAvg", "def test_withdraw_amount_view(self):\n self.account.current_balance = 100000\n self.account.save()\n\n amount = random.randint(10, 100000)\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_withdraw')\n request = client.post(url, {'amount': amount}, format='json')\n self.account.refresh_from_db()\n self.assertEqual(100000-amount, self.account.current_balance)", "def draw(self, draw_surface):\n super().draw(draw_surface)\n if self.active_sell_event is not None:\n self.active_sell_event.draw(draw_surface)", "def test_withdraw_amount_view_with_negative_amount(self):\n self.account.current_balance = 100000\n self.account.save()\n\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_withdraw')\n request = client.post(url, {'amount': -100}, format='json')\n self.assertEqual(400, request.status_code)", "def deposit(self, deposit_money):\r\n self.balance += deposit_money", "def test_withdraw_invalid_operation(client):\n eth = Asset.objects.create(\n code=\"ETH\",\n issuer=Keypair.random().public_key,\n sep24_enabled=True,\n withdrawal_enabled=False,\n distribution_seed=Keypair.random().secret,\n )\n response = client.post(WITHDRAW_PATH, {\"asset_code\": eth.code}, follow=True)\n content = response.json()\n assert response.status_code == 400\n assert content == {\"error\": \"invalid operation for asset ETH\"}", "def deposit_swth_before_withdrawal(request, url):\n\n def tear_down():\n # clean up here\n pass\n\n request.addfinalizer(tear_down)\n\n # encrypted_key = b\"6PYKozqwKwRYi77GN3AwTwXEssJZWbfneYKEYbiSeuNtVTGPFT2Q7EJpjY\"\n # pub_key = \"ANVLCD3xqGXKhDnrivpVFvDLcvkpgPbbMt\"\n client = SwitcheoApi(base_url=\"https://test-api.switcheo.network\")\n\n priv_key_wif = 'L4FSnRosoUv22cCu5z7VEEGd2uQWTK7Me83vZxgQQEsJZ2MReHbu'\n deposit_response = client.deposit(priv_key_wif=priv_key_wif, asset_id=\"SWTH\", amount=50,\n contract_hash=\"a195c1549e7da61b8da315765a790ac7e7633b82\",\n blockchain=\"neo\")\n print(deposit_response.text)", "def warehouse_officer_confirm_qty(self):\n if (\n self.approve_request_ids is None\n or self.approve_request_ids is False\n ):\n raise UserError(\"No line(s) defined!\")\n self._compute_confirm()\n for line in self.approve_request_ids:\n line._compute_state()\n if any(line.state != \"available\" for line in self.approve_request_ids):\n raise Warning(\n \"Please procure the items that are short in stock or process pending purchase agreements and try again!\"\n )\n else:\n self.state = 'transfer'", "def check_and_withdraw(self, buy, buy_exchange):\n # withdraw only once the buy is complete\n while True:\n buy_status = buy_exchange.trade_status(txid=buy['txid'])\n if buy_status['filled'] == 'success':\n key = self.arb[\"sell\"]\n sub_key = 'kraken_' + self.arb[\"crypto\"] if self.arb['buy'] == 'kraken' else self.arb[\"crypto\"] # The key loads the key to send to. However its different for kraken\n withdrawal = buy_exchange.withdrawal(wallet_currency=self.arb[\"crypto\"],\n amount=buy_status['amount'],\n key=load_config('config.yml')[key][sub_key])\n return withdrawal\n time.sleep(20)", "def withdraw_money():\n print(\"\\n\")\n print(messages.account_credentials)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.withdraw_money(credentials)\n start_again() if result else BankOperationsUi.withdraw_money()", "def test_withdraw_success_with_memo(client):\n usd = Asset.objects.create(\n code=\"USD\",\n issuer=Keypair.random().public_key,\n sep24_enabled=True,\n withdrawal_enabled=True,\n distribution_seed=Keypair.random().secret,\n )\n response = client.post(\n WITHDRAW_PATH, {\"asset_code\": usd.code, \"amount\": \"100\"}, follow=True\n )\n content = response.json()\n assert content[\"type\"] == \"interactive_customer_info_needed\"\n assert \"100\" in content[\"url\"]\n assert content.get(\"id\")\n\n t = Transaction.objects.filter(id=content.get(\"id\")).first()\n assert t\n assert t.stellar_account == \"test source address\"\n assert t.account_memo is TEST_ACCOUNT_MEMO\n assert t.muxed_account is None\n assert t.asset.code == usd.code\n assert t.protocol == Transaction.PROTOCOL.sep24\n assert t.kind == Transaction.KIND.withdrawal\n assert t.status == Transaction.STATUS.incomplete\n assert t.memo_type == Transaction.MEMO_TYPES.hash\n assert t.from_address is None", "def test_make_withdrawal(client):\n request_test = client.put('/checks_and_balances/api/v1.0/transaction',json=\n {\n \"account_id\": 1,\n \"transaction\": \"withdrawal\",\n \"amount\": 10.03\n })\n\n json_data = request_test.get_json(force=True)\n\n wanted_result = {\"Withdrawal Done. New balance\": \"90.47\"}\n\n assert json_data == wanted_result", "def test_withdraw_interactive_no_txid(client):\n usd = Asset.objects.create(\n code=\"USD\",\n issuer=Keypair.random().public_key,\n sep24_enabled=True,\n withdrawal_enabled=True,\n distribution_seed=Keypair.random().secret,\n )\n response = client.get(f\"{WEBAPP_PATH}?asset_code={usd.code}\", follow=True)\n assert response.status_code == 400\n assert \"transaction_id\" in response.content.decode()", "def payout(self):\n self.close()\n if self.is_paid:\n raise ValueError(\"Already paid out the wager.\")\n self.is_paid = True\n self.paid_on = datetime.datetime.now()\n payouts = self.get_payout_information()\n for info in payouts:\n bet, credits = info[\"bet\"], info[\"won\"]\n player = bet.created_by\n player.credits += credits\n player.save()\n self.save()", "def under_payout(self, under_payout):\n\n self._under_payout = under_payout", "def withdraw_get_txid(self, task_id):\n response = self.query('withdraw_get_txid', params=dict(task_id=task_id))\n return response", "def penup(self):\n if not self._drawing:\n return\n self.pen(pendown=False)", "def prepayment_deposited(self):\n return 'prepayment_deposited' if self.is_fully_paid() else 'awaiting_payment'", "def test_withdraw_muxed_account_success(client):\n usd = Asset.objects.create(\n code=\"USD\",\n issuer=Keypair.random().public_key,\n sep24_enabled=True,\n withdrawal_enabled=True,\n distribution_seed=Keypair.random().secret,\n )\n response = client.post(\n WITHDRAW_PATH,\n {\"asset_code\": usd.code, \"amount\": \"100\", \"account\": TEST_MUXED_ACCOUNT},\n follow=True,\n )\n content = response.json()\n assert content[\"type\"] == \"interactive_customer_info_needed\"\n assert \"100\" in content[\"url\"]\n assert content.get(\"id\")\n\n t = Transaction.objects.filter(id=content.get(\"id\")).first()\n assert t\n assert t.stellar_account == MuxedAccount.from_account(TEST_MUXED_ACCOUNT).account_id\n assert t.muxed_account == TEST_MUXED_ACCOUNT\n assert t.account_memo is None\n assert t.asset.code == usd.code\n assert t.protocol == Transaction.PROTOCOL.sep24\n assert t.kind == Transaction.KIND.withdrawal\n assert t.status == Transaction.STATUS.incomplete\n assert t.memo_type == Transaction.MEMO_TYPES.hash\n assert t.from_address == TEST_MUXED_ACCOUNT", "def weee_tax_applied(self, weee_tax_applied):\n\n self._weee_tax_applied = weee_tax_applied", "def bankerInvest(account, ongAmount):\n # RequireWitness(account)\n if CheckWitness(account) == False:\n # \"Check witness failed!\",\n Notify([\"BankerInvestErr\", 101])\n return False\n\n currentRound = getCurrentRound()\n\n # Require(getRoundGameStatus(currentRound) == STATUS_ON)\n if getRoundGameStatus(currentRound) != STATUS_ON:\n # Please wait for the admin to set initial investment!\n Notify([\"BankerInvestErr\", 102])\n return False\n\n # Require(_transferONG(account, ContractAddress, ongAmount))\n res = _transferONG(account, ContractAddress, ongAmount)\n if res == False:\n # Transfer ONG failed!\n Notify([\"BankerInvestErr\", 103])\n return False\n # try to update banker list\n bankersListKey = concatKey(concatKey(ROUND_PREFIX, currentRound), BANKERS_LIST_KEY)\n bankersListInfo = Get(GetContext(), bankersListKey)\n bankersList = []\n if bankersListInfo:\n bankersList = Deserialize(bankersListInfo)\n if checkInBankerList(account, bankersList):\n bankersList.append(account)\n bankersListInfo = Serialize(bankersList)\n Put(GetContext(), bankersListKey, bankersListInfo)\n else:\n bankersList.append(account)\n bankersListInfo = Serialize(bankersList)\n Put(GetContext(), bankersListKey, bankersListInfo)\n\n dividendForBankersPercentage = getDividendForBankersPercentage()\n runningVaultPercentage = getRunningVaultPercentage()\n\n # add dividend to all the bankers, 48%\n dividend = Div(Mul(ongAmount, dividendForBankersPercentage), 100)\n\n # update profit per investment for bankers\n bankersInvestment = getBankersInvestment(currentRound)\n if bankersInvestment != 0:\n profitPerInvestmentForBankersToBeAdd = Div(Mul(dividend, MagnitudeForProfitPerSth), bankersInvestment)\n Put(GetContext(), concatKey(concatKey(ROUND_PREFIX, currentRound), PROFIT_PER_INVESTMENT_FOR_BANKERS_KEY), Add(profitPerInvestmentForBankersToBeAdd, getProfitPerInvestmentForBankers(currentRound)))\n else:\n # there will be no dividend\n dividend = 0\n # add running vault, 50%\n runningVaultToBeAdd = Div(Mul(ongAmount, runningVaultPercentage), 100)\n Put(GetContext(), concatKey(concatKey(ROUND_PREFIX, currentRound), RUNNING_VAULT_KEY), Add(getRunningVault(currentRound), runningVaultToBeAdd))\n\n # add running vault balance\n Put(GetContext(), concatKey(concatKey(ROUND_PREFIX, currentRound), concatKey(BANKER_RUNING_VAULT_BALANCE_PREFIX, account)), Add(getBankerBalanceInRunVault(currentRound, account), runningVaultToBeAdd))\n # update real time running vault\n Put(GetContext(), concatKey(concatKey(ROUND_PREFIX, currentRound), REAL_TIME_RUNNING_VAULT), Add(getRealTimeRunningVault(currentRound), runningVaultToBeAdd))\n\n # treat the rest as the commission fee to admin, 2%\n restOngAmount = Sub(Sub(ongAmount, dividend), runningVaultToBeAdd)\n # update the commission fee\n Put(GetContext(), COMMISSION_KEY, Add(getCommission(), restOngAmount))\n\n # update the account (or the banker) 's dividend\n updateBankerDividend(account)\n # update account's investment\n bankerKey = concatKey(concatKey(ROUND_PREFIX, currentRound), concatKey(BANKER_INVEST_BALANCE_PREFIX, account))\n Put(GetContext(), bankerKey, Add(getBankerInvestment(currentRound, account), ongAmount))\n\n # update total bankers' investment\n Put(GetContext(), concatKey(concatKey(ROUND_PREFIX, currentRound), BANKERS_INVESTMENT_KEY), Add(bankersInvestment, ongAmount))\n\n # update total ong amount\n Put(GetContext(), TOTAL_ONG_KEY, Add(getTotalONG(), ongAmount))\n\n Notify([\"bankerInvest\", currentRound, account, ongAmount])\n\n return True", "def withdraw(self, user_id, money, **kwargs):\n user = User.objects(user_id=user_id).first()\n\n if money > 0:\n if user.balance >= money:\n print('Cantidad retirada: ', money)\n user.balance = float(user.balance) - float(money)\n user.save()\n else:\n print('No hay fondos suficientes para realizar el retiro.')\n else:\n print('No es posible retirar valores negativos.')", "def on_draw(self):\n return self._on_draw", "def strong_sells(self, strong_sells):\n\n self._strong_sells = strong_sells", "def make_withdraw(balance):\n\tdef withdraw(amount):\n\t\t# declare the name 'balance' nonlocal at the top of body of the function in which it is re-assigned\n\t\tnonlocal balance # nonlocal change of the value of balance will happen in the frame of make_withdraw !!!\n\t\tif amount > balance:\n\t\t\treturn 'insufficient funds'\n\t\tbalance = balance - amount # rebind balance in the first non-local frame in which it was bound previously\n\t\treturn balance \n\treturn withdraw", "def withdraw_by_username(self,amount,username):\r\n pass", "def cash_withdrawal(amt):\r\n global withdraw_money\r\n global balance_money\r\n withdraw_money = amt\r\n print(\"Amout enetered : \", withdraw_money)\r\n balance_money = balance_money - withdraw_money\r\n print(\"Withdraw success\")", "def withdraw_capital(self,remove_capital):\n if remove_capital <= self.not_invested_capital['value']:\n self.initial_capital['value'] -= remove_capital\n else:\n print(\"ERROR: Not enough not invested capital to be remove.\")\n print(\"Please reduce the quantity to be withdrawn or increase not invested capital\")\n # pending: what should it return? a message?\n return print(self.not_invested_capital)", "def poi_draw_class(self, poi_draw_class):\n\n self._poi_draw_class = poi_draw_class", "def withdraw_cash(self, qtt_100s, qtt_50s, qtt_20s):\n amount = PaperMoneyCounter().cash(qtt_100s, qtt_50s, qtt_20s)\n if (self.__is_logged_in) and (amount <= self.__balance) and (amount <= 1000):\n self.__balance = float(Decimal(str(self.__balance - amount)))\n self.register_operation(self.ACTIONS['WITHDRAWING'], amount)\n return True\n\n return False", "def transaction_amount_paid(self, transaction_amount_paid):\n\n self._transaction_amount_paid = transaction_amount_paid", "def withholding_tax_rate(self, withholding_tax_rate):\n\n self._withholding_tax_rate = withholding_tax_rate", "def face_offs_won(self, face_offs_won):\n\n self._face_offs_won = face_offs_won", "def pendown(self):\n if self._drawing:\n return\n self.pen(pendown=True)", "def action_confirm(self):\n # context = self._context or {}\n inv_obj = self.env['account.invoice']\n\n brw = self.browse(self.ids[0])\n line_ids = brw.line_ids\n if not line_ids:\n raise exceptions.except_orm(\n _('Invalid Procedure!'), _(\"No retention lines\"))\n\n res = [True]\n res += [False for i in line_ids\n if (i.wh_amount <= 0.0 or\n i.base_amount <= 0.0 or\n i.wh_src_rate <= 0.0)]\n if not all(res):\n raise exceptions.except_orm(\n _('Invalid Procedure!'),\n _(\"Verify retention lines do not have Null values(0.00)\"))\n\n res = 0.0\n for i in line_ids:\n res += i.wh_amount\n if abs(res - brw.wh_amount) > 0.0001:\n raise exceptions.except_orm(\n _('Invalid Procedure!'),\n _(\"Check the amount of withholdings\"))\n\n inv_ids = [i.invoice_id.id for i in brw.line_ids]\n if inv_ids:\n inv_obj.write({'wh_src_id': self.ids[0]})\n\n return self.write({'state': 'confirmed'})", "def under_review(self, under_review):\n\n self._under_review = under_review", "def consent(self, consent):\n if consent is None:\n raise ValueError(\"Invalid value for `consent`, must not be `None`\") # noqa: E501\n\n self._consent = consent", "def put(self, saving_transaction_id):\n return UpdateSavingDepositDetail(current_user.id, saving_transaction_id, request)", "def withholding_tax_amount(self, withholding_tax_amount):\n\n self._withholding_tax_amount = withholding_tax_amount", "def set_bankrupt(self):\n if self.status == self.PLAYER_BANKRUPT:\n return\n self.status = self.PLAYER_BANKRUPT\n self.game.player_bankrupted(self)", "def button_confirm_bank(self):\n\n self.unlink_unconfirmed_lines()\n ret = super(AccountBankStatement, self).button_confirm_bank()\n return ret", "def payment_transaction(self, **kwargs):\n # print('WebsiteSaleEventSet')\n order = request.website.sale_get_order()\n values = []\n for line in order.order_line:\n if line.event_set_ok:\n if line.product_id.event_is_expired:\n values.append(_('The registration qqis not longer available'))\n elif line.product_id.event_seats_availability == 'limited':\n cart_qty = int(sum(\n order.order_line.filtered(\n lambda p: p.product_id.id == line.product_id.id).mapped('product_uom_qty')\n )\n )\n avl_qty = line.product_id.event_seats_available\n if cart_qty > avl_qty:\n values.append(_('You ask for %s seats but only %s is available') % (cart_qty, avl_qty if avl_qty > 0 else 0))\n if values:\n raise ValidationError('. '.join(values) + '.')\n return super(WebsiteSaleEventSet, self).payment_transaction(**kwargs)" ]
[ "0.5891516", "0.5690135", "0.5676766", "0.5597701", "0.55876815", "0.55855316", "0.55006677", "0.5495458", "0.5472348", "0.54654574", "0.5433065", "0.53317225", "0.5330039", "0.5313943", "0.5277022", "0.52171", "0.51859295", "0.51577157", "0.5134747", "0.5110238", "0.5054822", "0.5024524", "0.5004318", "0.5001757", "0.5001067", "0.4923886", "0.49143445", "0.48868704", "0.48662046", "0.48662046", "0.48464626", "0.48322693", "0.4816753", "0.47950855", "0.47871873", "0.47429064", "0.47073406", "0.47028548", "0.46963686", "0.46621886", "0.46265343", "0.46201965", "0.46002334", "0.4560073", "0.45368037", "0.45070398", "0.4466716", "0.44385993", "0.44265497", "0.44094583", "0.4403325", "0.4369503", "0.43568355", "0.4349269", "0.43427655", "0.43409774", "0.43057296", "0.42833287", "0.42728835", "0.42715913", "0.42430174", "0.42395806", "0.42298666", "0.4229704", "0.4222713", "0.4182867", "0.41776052", "0.41734302", "0.41715643", "0.41543883", "0.41437736", "0.41151813", "0.41144213", "0.41112205", "0.41028765", "0.40781546", "0.4076243", "0.40760848", "0.40757325", "0.40581563", "0.40567684", "0.40516642", "0.40488458", "0.40382218", "0.40330935", "0.40290645", "0.4012732", "0.4009098", "0.40060863", "0.40033597", "0.39965245", "0.39940503", "0.3992947", "0.39882872", "0.39815083", "0.39710993", "0.3951131", "0.39407212", "0.3933791", "0.39328596" ]
0.78851956
0
Sets the escrow_address of this EscrowTransactionResponse.
def escrow_address(self, escrow_address): self._escrow_address = escrow_address
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_address(self, address):\n pass", "def address(self, address: object):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def set_address(self, address):\n self._java_ref.setAddress(address)", "def _set_address(self, v, load=False):\n try:\n t = YANGDynClass(v,base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"address must be of a type compatible with base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__address = t\n if hasattr(self, '_set'):\n self._set()", "def address(self, address: str):\n\n self._address = address", "def set_contract_addr(self, addr):\n\t\tself.contract_addr = addr\n\t\tself._bank_inst = self._w3.eth.contract(\n\t\t\taddress=self.contract_addr,\n\t\t\tabi=self._iface[\"abi\"],\n\t\t)", "def address(self, address):\n if address is None:\n raise ValueError(\"Invalid value for `address`, must not be `None`\")\n\n self._address = address", "def set_address(self, address):\n if address == \"\":\n self.address = Address(\"\", \"\", \"\")\n else:\n self.address = address", "def amended_address(self, amended_address):\n\n self._amended_address = amended_address", "def set_address(self,address): \n new_address = self._format_address(address)\n self.rs485.write_command('#00?8 {}'.format(new_address))\n self.rs485.clear_buffers()\n time.sleep(0.2)", "def setEthaddr(self):\n\t\tself.ethaddr = self.settings.getKeyValue('ethaddr')\n\t\tself.socket.send('setenv ethaddr ' + self.ethaddr+'\\r', 1)\n\t\treturn None", "def set_complete_address(self, complete_address):\n self.complete_address = complete_address", "def address(self, new_address):\n house_num, street_name, apt_num = new_address\n self._address.house_num = house_num\n self._address.street_name = street_name\n self._address.apt_num = apt_num", "def address_id(self, address_id):\n\n self._address_id = address_id", "def address_id(self, address_id):\n\n self._address_id = address_id", "def set_address(self, new_address, ):\n self.address.append(new_address)\n self.save()", "def setSicxAddress(self, _address: Address) -> None:\n self._sICX_address.set(_address)", "def set_address(self, address, defer=False):\n\n # The MAXUSB chip handles this for us, so we don't need to do anything.\n pass", "def set_remit_to_address(self, remit_to_address):\n self.remit_to_address = remit_to_address", "def setData(self,address):\n self.data = _pack_address(address)", "def rowguid(self, rowguid):\n\n self._rowguid = rowguid", "def address(self, address):\n if self.local_vars_configuration.client_side_validation and address is None: # noqa: E501\n raise ValueError(\"Invalid value for `address`, must not be `None`\") # noqa: E501\n\n self._address = address", "def address(self, address):\n if address is None:\n raise ValueError(\"Invalid value for `address`, must not be `None`\") # noqa: E501\n\n self._address = address", "def address(self, address: str):\n if address is None:\n raise ValueError(\"Invalid value for `address`, must not be `None`\") # noqa: E501\n\n self._address = address", "def setRow(self, row): \n self.row = row", "def set_row( self, row, ):\n self.ix_row = row", "def set_end_address(address):\n try:\n command(address + \"L\")\n except EppException as e:\n print 'No EPROM type is selected, or value is lower than start address.', e.value", "def client_addresses(self, client_addresses):\n\n self._client_addresses = client_addresses", "def response_host(self, response_host):\n\n self._response_host = response_host", "def response_host(self, response_host):\n\n self._response_host = response_host", "def _set_address(self, address):\n for retry in range(0, self.__RETRY_MAX_NUM):\n sequence = bytearray(address.to_bytes(4, 'big'))\n sequence.append(self._checksum(sequence))\n\n bytes_sent = self._port_handle.write(sequence)\n if bytes_sent != len(sequence):\n raise DfuException(\n 'Serial port write error: tried to'\n 'send {} bytes, but {} was sent.'.format(\n len(sequence), bytes_sent))\n if self._is_acknowledged():\n break\n else:\n raise DfuException('Setting address failed after {} '\n 'retries.'.format(retry + 1))", "def setaccount(self, vergeaddress, account):\n return self.proxy.setaccount(vergeaddress, account)", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def setAddress(self, ip_address):\n # type: (str)->None\n\n self._validator.validate_one(\n 'address', VALID_OPTS['address'], ip_address)\n self._ifAttributes['address'] = ip_address", "def set_relay_address(self, relay_addr):\n self.relay_addr = self._Address(*relay_addr)", "def set_host_addr(self, addr: str) -> None:\n self.config[\"host_addr\"] = addr", "def set_addr(self, addr):\n Server.t_addresses[threading.get_ident()] = addr", "def setWriteAddress(self, address):\n DPxSetDinBuffWriteAddr(address)", "def __init__(self, slave_address):\n super(SensorBridgeCmdSetSlaveAddress, self).__init__(\n data=b\"\".join([pack(\">B\", slave_address)]),\n max_response_time=0.5,\n post_processing_time=0.0,\n min_response_length=1,\n max_response_length=1\n )", "def update_address(cls, address_data):\n address_instance = cls.objects.get(email=address_data['customer']['email'])\n address_data = address_data.get('addresses')\n for field_name, values in address_data:\n setattr(address_instance, field_name, values)\n address_instance.save()\n return address_instance.save()", "def address_line3(self, address_line3):\n\n self._address_line3 = address_line3", "def employee(self, employee: object):\n\n self._employee = employee", "def email_address(self, email_address: \"str\"):\n self._attrs[\"emailAddress\"] = email_address", "def setRow(self, row_number, row):\n self.data[:,row_number] = row\n return", "def set_address(self, host, port):\n self.host = host\n self.port = port", "def address(self) -> object:\n return self._address", "def acquirer_region(self, acquirer_region):\n\n self._acquirer_region = acquirer_region", "def location_address(self, location_address):\n\n self._location_address = location_address", "def set_start_address(address):\n try:\n command(address + \"P\")\n except EppException as e:\n print 'No EPROM type is selected, or value is higher than end address.', e.value", "def addresses(self, addresses: \"List[str]\"):\n self._attrs[\"addresses\"] = addresses", "def addresses(self, addresses: \"List[str]\"):\n self._attrs[\"addresses\"] = addresses", "def addresses(self, addresses: \"List[str]\"):\n self._attrs[\"addresses\"] = addresses", "def set_amf_addr(self, addr: str) -> None:\n self.config[\"amfConfigs\"][0][\"address\"] = addr", "def setRow(self, row):\n # Row of the database where the values of the variables are found\n self._row = row\n for e in self.children:\n e.setRow(row)", "def address_1(self, address_1):\n\n self._address_1 = address_1", "def address1(self, address1):\n\n self._address1 = address1", "def set_value(self, address, value):\n\n self.data[address] = value\n\n return True", "def peer_addresses(self, peer_addresses):\n\n self._peer_addresses = peer_addresses", "def _set_addresses(self):\n if self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_BODY_REPORT) and \\\n self._report_data['totalResultsSize'] > 0:\n self._set_search_addresses()\n elif self._report_key in (ReportTypes.MHR_REGISTRATION, ReportTypes.MHR_TRANSFER,\n ReportTypes.MHR_EXEMPTION, ReportTypes.MHR_TRANSPORT_PERMIT, ReportTypes.MHR_NOTE):\n self._set_registration_addresses()", "def setEsquerdo(self, esquerdo):\n self.__esquerdo = esquerdo", "def set_camera_address(self, address: int):\n\n self.camera_address = address", "def add_address(self, **kwargs):\n addressitem = AddressItem(**kwargs)\n self.addresses.append(addressitem)\n # TODO check uniqueness of email addresses", "def econsent_signature(self, econsent_signature):\n\n self._econsent_signature = econsent_signature", "def setAddress(self, addresses):\n self.addr = addresses\n for i in xrange(len(self.addr)):\n if not ':' in self.addr[i]: \n self.addr[i] += ':' + self.port \n self.addr[i] += '/' + self.service", "def employer_address(self, employer_address):\n if employer_address is not None and len(employer_address) > 1024:\n raise ValueError(\"Invalid value for `employer_address`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._employer_address = employer_address", "def address(self):\n\n return self._address", "def set_information_object_address(self, information_object_address):\n if type(information_object_address) is int:\n self.information_object_address = information_object_address", "def address_line1(self, address_line1):\n\n self._address_line1 = address_line1", "def address_line1(self, address_line1):\n\n self._address_line1 = address_line1", "def __init__(self, address):\n self._address = address", "def epid(self, epid):\n\n self._epid = epid", "def epid(self, epid):\n\n self._epid = epid", "def put(self, request, *args, **kwargs):\n\n payload = request.data\n\n instance = self.get_object()\n\n # validate request data body length is zero raise error message\n if len(payload) == 0:\n return APIResponse({'message': NOT_FOUND_JSON_DATA}, HTTP_400_BAD_REQUEST)\n\n # create store address serializers object\n serializer = self.serializer_class(data=payload, partial=True, context={'request': request})\n\n # check admin address serializers is valid\n if not serializer.is_valid():\n return APIResponse(serializer.errors, HTTP_400_BAD_REQUEST)\n\n validated_data = serializer.validated_data\n\n # get last transaction save point id\n sid = transaction.savepoint()\n\n try:\n # update address\n instance = serializer.update(instance, validated_data)\n except Exception as err:\n logger.error(\"Unexpected error occurred : %s.\", err.args[0])\n # roll back transaction if any exception occur while update address\n transaction.savepoint_rollback(sid)\n return APIResponse({\"message\": err.args[0]}, HTTP_400_BAD_REQUEST)\n\n # convert model object into json\n data = AddressViewSerializer(instance).data\n data['message'] = UPDATE_ADDRESS\n\n return APIResponse(data, HTTP_OK)", "def crowns(self, crowns):\n\n self._crowns = crowns", "def address_block(self, address_block):\n if self.local_vars_configuration.client_side_validation and address_block is None: # noqa: E501\n raise ValueError(\"Invalid value for `address_block`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n address_block is not None and len(address_block) > 10):\n raise ValueError(\"Invalid value for `address_block`, length must be less than or equal to `10`\") # noqa: E501\n\n self._address_block = address_block", "def contract(self, contract):\n\n self._contract = contract", "def payee_state(self, payee_state):\n\n self._payee_state = payee_state", "def set_ssl_addr(self, addr):\n Server.t_ssl_addresses[threading.get_ident()] = addr", "def address_type(self, address_type):\n\n self._address_type = address_type", "def get_address(self):\n \n if \"'\" in self.data.get(\"AddressInfo\").get(\"AddressLine1\") :\n self.data.get(\"AddressInfo\").get(\"AddressLine1\").replace(\"'\",\"\")\n\n return self.data.get(\"AddressInfo\").get(\"AddressLine1\")", "def address(self):\n return self._address", "def address(self):\n return self._address", "def address(self):\n return self._address", "def address(self):\n return self._address", "def address(self):\n return self._address", "def econsent(self, econsent):\n\n self._econsent = econsent", "def FillInventoryServicePropertiesDuringEscrow(self, entity, request):\n return", "def open_edit_address(self, address: dict) -> None:\n row = self.addresses_list.surface_address_row(address)\n\n row.open_kebab_menu()\n row.kebab_menu.edit_address()", "def set_addresses(cls, records, name, value=None):\n Party = Pool().get('party.party')\n\n for record in records:\n Party.write([record.party], {'addresses': value})", "def set_rack(self, rack):\n self.rack = rack\n self.barcode = rack.barcode" ]
[ "0.60267246", "0.5820161", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.57166636", "0.56964433", "0.5556428", "0.55402744", "0.55071515", "0.5419375", "0.5411141", "0.54009694", "0.538831", "0.5359071", "0.53501517", "0.5252209", "0.5244996", "0.5244996", "0.5235547", "0.52323544", "0.5160443", "0.5158121", "0.51524144", "0.51408887", "0.5102618", "0.50965583", "0.507119", "0.50534254", "0.50010335", "0.49800354", "0.4964033", "0.49359563", "0.49359563", "0.4933583", "0.49214283", "0.48574868", "0.48574868", "0.48574868", "0.48419854", "0.47616613", "0.47377732", "0.46954116", "0.46884343", "0.46479958", "0.46287787", "0.4615265", "0.46028432", "0.45769036", "0.4569809", "0.45542955", "0.4551902", "0.4544169", "0.45416215", "0.45373496", "0.45362714", "0.45362714", "0.45362714", "0.45355973", "0.45244148", "0.45157033", "0.4503293", "0.45013666", "0.448403", "0.44818854", "0.44777164", "0.44649878", "0.4434662", "0.44329444", "0.4429583", "0.44168994", "0.44105342", "0.44077855", "0.4400459", "0.4400459", "0.43916065", "0.43914244", "0.43914244", "0.43882966", "0.4382701", "0.4365321", "0.43584985", "0.4330846", "0.43261185", "0.43258134", "0.43141967", "0.43107876", "0.43107876", "0.43107876", "0.43107876", "0.43107876", "0.42974135", "0.4282762", "0.4282282", "0.42716247", "0.42703772" ]
0.83312446
0
Sets the record_status of this EscrowTransactionResponse.
def record_status(self, record_status): self._record_status = record_status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n self.db_handler.update_item({'api_version': TsV2CatalogHandler.api_version}, self.status)", "def line_status(self, line_status):\n\n self._line_status = line_status", "def account_status(self, account_status):\n\n self._account_status = account_status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def set_recording(self, recording):\n self.record_states = recording", "def update_record_status(self, context, payload):\n access_token = util.get_access_token(context[\"headers\"])\n record = ZohorecruitRecord(**payload)\n endpoint = f\"{record.module}/status\"\n record_data = {\n \"data\": [\n {\n \"ids\": [record.record_id],\n \"Candidate_Status\": record.status,\n \"comments\": record.comments\n }\n ],\n \"trigger\":[record.trigger]\n }\n response = util.rest(\"PUT\",endpoint,access_token,record_data)\n return json.loads(response.text)", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def set_status(self, status: Status) -> None:\n if status.status_code == StatusCode.ERROR:\n self.elastic_span.outcome = constants.OUTCOME.FAILURE\n elif status.status_code == StatusCode.OK:\n self.elastic_span.outcome = constants.OUTCOME.SUCCESS\n else:\n self.elastic_span.outcome = constants.OUTCOME.UNKNOWN", "def job_status(self, job_status):\n\n self._job_status = job_status", "def setStatus(self, status):\n self.__status = status", "def status(self, status):\n self._set_property_(self.STATUS, str(status))", "def record_type_enum(self, record_type_enum):\n\n self._record_type_enum = record_type_enum", "def setstatus(self, status):\n with self.lock:\n self.status = status", "def response_status_id(self, response_status_id):\n\n self._response_status_id = response_status_id", "def get_status(self, rec, report):\n\n # Check if this card exists\n if (self.bus, self.slot) in self.crate.frus.keys():\n rec.VAL = self.crate.frus[(self.bus, self.slot)].alarm_level\n else:\n rec.VAL = ALARM_STATES.index('UNSET')\n # Make the record defined regardless of value\n rec.UDF = 0", "def set_status(self, status, ts=None):\n return ObjectStatus.set_status(self, status, ts=ts)", "def setRecord(self,record):\n idLower = record.getId().lower()\n type = record.name\n typeIds = self.indexed[type]\n if idLower in typeIds:\n oldRecord = typeIds[idLower]\n index = self.records.index(oldRecord)\n self.records[index] = record\n else:\n self.records.append(record)\n typeIds[idLower] = record", "def setStatus(self, status, details=None):\n self.onStatusSent(None, status)", "def SetStatus(self, status):\r\n self.status = status", "def status_detail(self, status_detail):\n self._status_detail = status_detail", "def status(self, status: str):\n\n self._status = status", "def status(self, status: str):\n\n self._status = status", "def status_detail(self, status_detail):\n\n self._status_detail = status_detail", "def _set_status(self, status):\n with self.status_lock:\n if (status in _ENDING_STATUSES) or (not self.status in _ENDING_STATUSES):\n self.status = status", "def set_status(self, status: HTTPProxyStatus) -> None:\n self._status = status\n self.update_actor_details(status=self._status)", "def processRecord(self, record):\n\n current_status = record.status\n\n if current_status == 'pre-accepted':\n new_status = 'accepted'\n elif current_status == 'pre-rejected':\n new_status = 'rejected'\n else:\n # no work to be done\n return record\n\n fields = {'status': new_status}\n return self.updateEntityProperties(record, fields)", "def _is_record_status(self, status_id):\n return status_id == self.record_status", "def set_event_status(self, new_event_status):\n self.event_status = new_event_status", "def payment_status(self, payment_status):\n\n self._payment_status = payment_status", "def setStatus(self, newStatus):\n self._status = newStatus", "def status(self, status: int):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status", "def set_status(self, status: JobStatus, pipeline: Optional['Pipeline'] = None) -> None:\n self._status = status\n connection: 'Redis' = pipeline if pipeline is not None else self.connection\n connection.hset(self.key, 'status', self._status)", "def status(self, status):\n allowed_values = [\"D\", \"P\", \"V\", \"S\", \"M\", \"I\", \"R\", \"C\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def set_status(self, status, comment=None):\n\n self.status_history.create(name=status, comment=comment)\n self.status = status", "def id_status(self, id_status):\n self._id_status = id_status", "def status(self, status):\n allowed_values = [\"I\", \"A\", \"S\", \"T\", \"D\"]\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status`, must be one of {0}\"\n .format(allowed_values)\n )\n self._status = status", "def status_id(self, status_id):\n\n self._status_id = status_id", "def status(self, status):\n allowed_values = [\"C\", \"D\", \"P\", \"I\", \"E\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def entity_async_status(self, entity_async_status):\n\n self._entity_async_status = entity_async_status", "def entity_async_status(self, entity_async_status):\n\n self._entity_async_status = entity_async_status", "def entity_async_status(self, entity_async_status):\n\n self._entity_async_status = entity_async_status", "def record(\n self, response_status: int, response_body: str, request_body: Dict[str, object]\n ) -> None:\n pass", "def __set_job_status(self, job: Job):\n\n self.redis_client.set(f'jobstatus:{job.id}:{str(job.status)}', f'job:{job.id}')", "def set_recording_state(self, state):\n assert isinstance(state, (bool, int))\n self.recording = state", "def agent_status(self, agent_status):\n\n self._agent_status = agent_status", "def connection_record(self, record: ConnectionRecord):\n self._connection_record = record", "def detailed_status(self, detailed_status):\n\n self._detailed_status = detailed_status", "def update_remediation_status(self, status):\n self.remediation_status = status", "def status(self, value: typing.Union[\"VolumeAttachmentStatus\", dict]):\n if isinstance(value, dict):\n value = typing.cast(\n VolumeAttachmentStatus,\n VolumeAttachmentStatus().from_dict(value),\n )\n self._properties[\"status\"] = value", "def status(self, status):\n if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status", "def status_code(self, status_code):\n\n self._status_code = status_code", "def set_status(self, new_status):\n if new_status == self.status:\n return\n\n old_status = self.status\n\n if new_status not in self.available_statuses():\n raise exceptions.InvalidLineStatus(\n _(\n \"'%(new_status)s' is not a valid status (current status:\"\n \" '%(status)s')\"\n )\n % {\"new_status\": new_status, \"status\": self.status}\n )\n self.status = new_status\n self.save()\n\n # Send signal for handling status changed\n order_line_status_changed.send(\n sender=self,\n line=self,\n old_status=old_status,\n new_status=new_status,\n )", "def data_status(self, data_status):\n self._data_status = data_status", "def status(self, value: ControllerStatus):\n self._status = value\n self.__status_event.set()", "def record(self, backend, job_id, status):\n self.file.write(\"%s;%d;%r\\n\" % (backend, job_id, status))\n self.file.flush()", "def status(self, status):\n allowed_values = [\"loaned\", \"finished\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\".format(status, allowed_values) # noqa: E501\n )\n\n self._status = status", "def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return", "def workflow_status(self, workflow_status):\n self._workflow_status = workflow_status", "def set_order_status(self, status, orderid=0, envtype=0):\n if int(status) not in TRADE_CN.REV_ORDER_STATUS:\n error_str = ERROR_STR_PREFIX + \"the type of status is wrong \"\n return RET_ERROR, error_str\n\n if not TRADE_CN.check_envtype_cn(envtype):\n error_str = ERROR_STR_PREFIX + \"the type of environment param is wrong \"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(SetOrderStatusCN.cn_pack_req,\n SetOrderStatusCN.cn_unpack_rsp)\n\n # the keys of kargs should be corresponding to the actual function arguments\n kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'localid': str(0),\n 'orderid': str(orderid), 'status': str(status)}\n\n ret_code, msg, set_order_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = ['envtype', 'orderID']\n set_order_table = pd.DataFrame(set_order_list, columns=col_list)\n\n return RET_OK, set_order_table", "def set_connection_status(self, connection_status: Literal[ConnectionState]) -> None:\n self.connection_status = connection_status\n self.publish(self.key_gen(\"connection_status\"), connection_status)", "def _set_status(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"status\", rest_name=\"status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"status must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"status\", rest_name=\"status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__status = t\n if hasattr(self, '_set'):\n self._set()", "def status(self, status):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\")\n allowed_values = [\"success\", \"warning\", \"error\", \"pending\"]\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\"\n .format(status, allowed_values)\n )\n\n self._status = status", "def status(self, status):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n if status is not None and len(status) < 1:\n raise ValueError(\"Invalid value for `status`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._status = status", "def set_status(self, new_status):\n if new_status == self.status:\n return\n\n old_status = self.status\n\n if new_status not in self.available_statuses():\n raise exceptions.InvalidOrderStatus(\n _(\n \"'%(new_status)s' is not a valid status for order %(number)s\"\n \" (current status: '%(status)s')\"\n )\n % {\n \"new_status\": new_status,\n \"number\": self.number,\n \"status\": self.status,\n }\n )\n self.status = new_status\n if new_status in self.cascade:\n new_line_status = self.cascade[new_status]\n for line in self.lines.all():\n if new_line_status in line.available_statuses():\n line.status = new_line_status\n line.save()\n self.save()\n\n # Send signal for handling status changed\n order_status_changed.send(\n sender=self,\n order=self,\n old_status=old_status,\n new_status=new_status,\n )\n\n self._create_order_status_change(old_status, new_status)", "def status(self, status: dict):\n pass", "def set_status(self, status):\n if not status == self._status:\n self._status = status\n self.winstance.send_event('State changed to ' + self._status)\n\n self.completed = not self.parent_node.is_job or \\\n self._status == 'COMPLETED'\n\n if self.completed:\n self.publish()\n\n if not self.parent_node.is_job:\n self.failed = False\n else:\n self.failed = self.parent_node.is_job and \\\n (self._status == 'BOOT_FAIL' or\n self._status == 'CANCELLED' or\n self._status == 'FAILED' or\n self._status == 'REVOKED' or\n self._status == 'TIMEOUT')", "def set_recording(self, recording):\r\n self.recording = recording", "def records(self, records):\n\n self._records = records", "def records(self, records):\n\n self._records = records" ]
[ "0.5884911", "0.5822956", "0.5775433", "0.56188405", "0.56188405", "0.56188405", "0.561294", "0.55961376", "0.5581018", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5559708", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.5553998", "0.55286026", "0.5499783", "0.54820853", "0.54818654", "0.5423004", "0.54228616", "0.5421881", "0.5420193", "0.5328292", "0.5319678", "0.5311959", "0.5285364", "0.5276197", "0.52688825", "0.52688825", "0.52629644", "0.5261177", "0.52283865", "0.52242225", "0.5219313", "0.5217466", "0.519713", "0.5192151", "0.51831996", "0.5179622", "0.5159296", "0.5153973", "0.5108803", "0.50965804", "0.50913095", "0.5052452", "0.5042574", "0.5042574", "0.5042574", "0.5038511", "0.50382787", "0.5036143", "0.50349796", "0.5021927", "0.5003815", "0.49809802", "0.4977544", "0.49584398", "0.4957226", "0.49358457", "0.49266165", "0.48653305", "0.4859267", "0.48572803", "0.4844848", "0.48237604", "0.4810176", "0.4801177", "0.47930038", "0.4791995", "0.47914553", "0.47636577", "0.47432244", "0.47391963", "0.47282976", "0.4717356", "0.4717356" ]
0.783832
0
Sets the create_date of this EscrowTransactionResponse.
def create_date(self, create_date): self._create_date = create_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def creation_date(self, creation_date):\n\n self._creation_date = creation_date", "def creation_date(self, creation_date):\n\n self._creation_date = creation_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n\n self._created_date = created_date", "def created_date(self, created_date):\n self._created_date = created_date", "def SetDateCreated(self, date):\n self.datecreated = str(date)", "def date_created(self, date_created: datetime):\n\n self._date_created = date_created", "def date_created(self, date_created):\n \n self._date_created = date_created", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n self._date_created = date_created", "def date_created(self, date_created):\n self._date_created = date_created", "def set_account_created_date(self, account_created_date):\n self.account_created_date = account_created_date", "def datecreated(self, datecreated):\n\n self._datecreated = datecreated", "def set_creation_date(self, creation_date):\n\t\t\n\t\tif (creation_date.__class__ != str or creation_date ==\"\") and (creation_date.__class__ != time.struct_time or len(creation_date) != 9 ):\n\t\t\traise InvalidParameterError(\"creation_date\", \"creation_date is not in a proper format\")\n\t\ttry:\n\t\t\tif creation_date.__class__ == str:\n\t\t\t\ttmp_cd = time.strptime(creation_date, '%S %M %H %d %m %Y')\n\t\t\telif creation_date.__class__ == time.struct_time:\n\t\t\t\ttmp_cd = creation_date\n\t\t\tself.__creation_date = datetime(tmp_cd[0], tmp_cd[1], tmp_cd[2], tmp_cd[3], tmp_cd[4], tmp_cd[5])\n\t\texcept:\n\t\t\traise InvalidDate, \"date is not valid creation_date is not in a proper format\"", "def set_created_date(self, doc, created):\n if not self.created_date_set:\n self.created_date_set = True\n date = utils.datetime_from_iso_format(created)\n if date is not None:\n doc.creation_info.created = date\n return True\n else:\n raise SPDXValueError('CreationInfo::Date')\n else:\n raise CardinalityError('CreationInfo::Created')", "def created_date(self, created_date):\n if created_date is None:\n raise ValueError(\"Invalid value for `created_date`, must not be `None`\") # noqa: E501\n\n self._created_date = created_date", "def set_date(self, date):\n self.date = date\n return", "def setCreatedDate(self, *args):\n return _libsbml.ModelHistory_setCreatedDate(self, *args)", "def create_at(self, create_at):\n\n self._create_at = create_at", "def set_date(self, date):\n self.date = date", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def set_created(self, dt):\n self.created = dt_to_iso(dt)", "def set_date(self, date):\n self.data['date'] = date", "def create(self):\n self.created_date = timezone.now()\n self.save()", "def set_account_created_date_formatted(self, account_created_date_formatted):\n self.account_created_date_formatted = account_created_date_formatted", "def set_datetime(self, date):\n self.date = date", "def transaction_date(self, value):\n if value:\n self._transaction_date = (\n parse(value).date() if isinstance(value, type_check) else value\n )", "def date(self, date):\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n if self.local_vars_configuration.client_side_validation and date is None: # noqa: E501\n raise ValueError(\"Invalid value for `date`, must not be `None`\") # noqa: E501\n\n self._date = date", "def create_log(self, create_log):\n\n self._create_log = create_log", "def created_date_utc(self, created_date_utc):\n\n self._created_date_utc = created_date_utc", "def set_start_date(self, date):\n pass", "def settlement_date(self, value):\n if value:\n self._settlement_date = (\n parse(value).date() if isinstance(value, type_check) else value\n )", "def set_date(self, date):\n\n newdate = datetime.datetime.strptime(date, \"%Y-%m-%dT%H:%M:%S\")\n self.__get_century(date)\n self.__bus.write_byte_data(self.__rtcaddress,\n self.SECONDS,\n self.__dec_bcd(newdate.second))\n self.__bus.write_byte_data(self.__rtcaddress,\n self.MINUTES,\n self.__dec_bcd(newdate.minute))\n self.__bus.write_byte_data(self.__rtcaddress,\n self.HOURS,\n self.__dec_bcd(newdate.hour))\n self.__bus.write_byte_data(self.__rtcaddress,\n self.DAYOFWEEK,\n self.__dec_bcd(newdate.weekday()))\n self.__bus.write_byte_data(self.__rtcaddress,\n self.DAY,\n self.__dec_bcd(newdate.day))\n self.__bus.write_byte_data(self.__rtcaddress,\n self.MONTH,\n self.__dec_bcd(newdate.month))\n self.__bus.write_byte_data(self.__rtcaddress,\n self.YEAR,\n self.__dec_bcd(newdate.year -\n self.__century))\n return", "def _date(self, _date):\n\n self.__date = _date", "def _date(self, _date):\n\n self.__date = _date", "def created(self, created):\n if created is None:\n raise ValueError(\"Invalid value for `created`, must not be `None`\")\n\n self._created = created", "def set_checkout(self, date):\n if type(date) != dt.datetime:\n raise TypeError('date must be a datetime.datetime object')\n else:\n pass\n self._checkout_date = date", "def _date(self, _date: datetime):\n if _date is None:\n raise ValueError(\"Invalid value for `_date`, must not be `None`\") # noqa: E501\n\n self.__date = _date", "def date(self, date):\n if date is None:\n raise ValueError(\n \"Invalid value for `date`, must not be `None`\"\n ) # noqa: E501\n\n self._date = date", "def create_order_info(self, create_order_info):\n\n self._create_order_info = create_order_info", "def receipt_date(self, receipt_date):\n\n self._receipt_date = receipt_date", "def receipt_date(self, receipt_date):\n\n self._receipt_date = receipt_date", "def date(self, value):\n self.date_value = value", "def date_start(self, date_start):\n\n self._date_start = date_start", "def creation_time(self, creation_time):\n\n self._creation_time = creation_time", "def creation_time(self, creation_time):\n\n self._creation_time = creation_time", "def create_info(self, create_info):\n self._create_info = create_info", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def created(self, created):\n\n self._created = created", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def settlement_date(self) -> datetime.date:\n return self.__settlement_date", "def set_rdate(self, rdate):\n self.__rdate = rdate", "def start_date(self, start_date):\n self._start_date = start_date", "def start_date(self, start_date):\n \n self._start_date = start_date", "def created_at(self, created_at):\n\n self._created_at = created_at", "def created_at(self, created_at):\n\n self._created_at = created_at", "def created_at(self, created_at):\n\n self._created_at = created_at", "def created_at(self, created_at):\n\n self._created_at = created_at", "def created_at(self, created_at):\n\n self._created_at = created_at" ]
[ "0.68681526", "0.68681526", "0.68388635", "0.68388635", "0.68388635", "0.68388635", "0.68388635", "0.6816522", "0.6814412", "0.6662243", "0.66395116", "0.6612482", "0.6612482", "0.6612482", "0.65789974", "0.65789974", "0.6504573", "0.64964783", "0.64300257", "0.61280626", "0.6052631", "0.6027254", "0.59505725", "0.59326", "0.59287363", "0.59182864", "0.59182864", "0.59182864", "0.59182864", "0.59182864", "0.59182864", "0.59182864", "0.59182864", "0.59182864", "0.59182864", "0.5812298", "0.58048224", "0.57988924", "0.57947993", "0.56282526", "0.55467296", "0.55384827", "0.5523236", "0.5523236", "0.5523236", "0.5523236", "0.5523236", "0.5508598", "0.54799765", "0.5462451", "0.54580677", "0.5397321", "0.53816384", "0.53528655", "0.53528655", "0.5324752", "0.53193736", "0.5295073", "0.52556145", "0.52317417", "0.5219214", "0.5219214", "0.52121115", "0.518887", "0.51222616", "0.51222616", "0.51189435", "0.51145095", "0.51145095", "0.51145095", "0.51145095", "0.51145095", "0.51145095", "0.51145095", "0.51145095", "0.51145095", "0.51145095", "0.51145095", "0.51145095", "0.51145095", "0.5103838", "0.5103838", "0.5103838", "0.5103838", "0.5103838", "0.5103838", "0.5103838", "0.5103838", "0.5103838", "0.50998336", "0.5098604", "0.5082061", "0.50642025", "0.5055891", "0.5055891", "0.5055891", "0.5055891", "0.5055891" ]
0.80460167
2
Sets the update_date of this EscrowTransactionResponse.
def update_date(self, update_date): self._update_date = update_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n if updated_date is None:\n raise ValueError(\"Invalid value for `updated_date`, must not be `None`\") # noqa: E501\n\n self._updated_date = updated_date", "def updated_date(self, updated_date):\n if updated_date is None:\n raise ValueError(\"Invalid value for `updated_date`, must not be `None`\") # noqa: E501\n if updated_date is not None and len(updated_date) < 1:\n raise ValueError(\"Invalid value for `updated_date`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._updated_date = updated_date", "def update(self, date):\r\n self.date = date", "def updated(self, updated: datetime):\n\n self._updated = updated", "def updated_date(self):\n return self._updated_date", "def updated_date(self):\n return self._updated_date", "def set_date(self, date):\n self.date = date\n return", "def set_date(self, date):\n self.date = date", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def set_date(self, date):\n self.data['date'] = date", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def updated_at(self, updated_at):\n\n self._updated_at = updated_at", "def update_time(self, update_time):\n\n self._update_time = update_time", "def set_end_date(self, date):\n pass", "def set_rdate(self, rdate):\n self.__rdate = rdate", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n self._date = date", "def set_exchange_rate_date(self, exchange_rate_date):\n self.set_value_into_input_field(self.exchange_rate_date_locator, exchange_rate_date)", "def last_update(self, last_update):\n\n self._last_update = last_update", "def mod_date(self, mod_date):\n\n self._mod_date = mod_date", "def _date(self, _date):\n\n self.__date = _date", "def _date(self, _date):\n\n self.__date = _date", "def date(self, new_date):\n self._date.date = new_date", "def set_datetime(self, date):\n self.date = date", "def set_checkout(self, date):\n if type(date) != dt.datetime:\n raise TypeError('date must be a datetime.datetime object')\n else:\n pass\n self._checkout_date = date", "def modified_date(self, modified_date):\n\n self._modified_date = modified_date", "def modified_date(self, modified_date):\n\n self._modified_date = modified_date", "def update_db_record(self, update_body: dict):\n for attribute, value in update_body.items():\n if attribute in self._update_allowed_fields:\n setattr(self, attribute, value)\n self.updated_at = datetime.now()\n self.save()", "def revision_date(self, revision_date):\n\n self._revision_date = revision_date", "def updated_by(self, updated_by):\n\n self._updated_by = updated_by", "def updated(self, updated):\n\n self._updated = updated", "def updated(self, updated):\n\n self._updated = updated", "def updated(self, updated):\n\n self._updated = updated", "def updated(self, updated):\n\n self._updated = updated", "def last_updated(self, last_updated):\n\n self._last_updated = last_updated", "def last_updated(self, last_updated):\n\n self._last_updated = last_updated", "def last_updated(self, last_updated):\n\n self._last_updated = last_updated", "def last_updated(self, last_updated):\n\n self._last_updated = last_updated", "def set_adjustment_charge_end_date(self, end_date):\n self.set_value_into_input_field(self.end_date_locator, end_date)", "def citation_date(self, citation_date):\n\n self._citation_date = citation_date", "def updated(self) -> datetime:\n return datetime.strptime(self.data['updated_at'],\n '%Y-%m-%dT%H:%M:%S.%fZ')", "def updated_at(self, updated_at):\n if self.local_vars_configuration.client_side_validation and updated_at is None: # noqa: E501\n raise ValueError(\"Invalid value for `updated_at`, must not be `None`\") # noqa: E501\n\n self._updated_at = updated_at", "def rating_end_date(self, rating_end_date):\n\n self._rating_end_date = rating_end_date", "def announcement_date(self, announcement_date):\n\n self._announcement_date = announcement_date", "def receipt_date(self, receipt_date):\n\n self._receipt_date = receipt_date", "def receipt_date(self, receipt_date):\n\n self._receipt_date = receipt_date", "def last_update_timestamp(self, last_update_timestamp):\n\n self._last_update_timestamp = last_update_timestamp", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def expiration_date(self, expiration_date):\n\n self._expiration_date = expiration_date", "def date_modified(self, date_modified):\n \n self._date_modified = date_modified", "def updated(self, updated: str):\n\n self._updated = updated", "def end_date(self, end_date):\n self._end_date = end_date", "def rating_date(self, rating_date):\n\n self._rating_date = rating_date", "def updated_on(self, updated_on):\n\n self._updated_on = updated_on", "def _date(self, _date: datetime):\n if _date is None:\n raise ValueError(\"Invalid value for `_date`, must not be `None`\") # noqa: E501\n\n self.__date = _date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n\n self._end_date = end_date", "def update(self, record):\n record[self.UPDATED_AT] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n super(self.__class__, self).update(record)", "def set_date(self, date):\n self.date = self.date_to_local(date)\n # ephem deals only in UTC\n self.site.date = ephem.Date(self.date_to_utc(self.date))", "def add_update(self, update):\r\n self._updates.append(update)", "def end_date(self, end_date):\n if end_date is None:\n end_date = datetime.utcnow()\n\n self._end_date = dt_utils.parse_date(end_date)", "def version_block(self, block_data, user_id, update_version):\n if block_data.edit_info.update_version == update_version:\n return\n\n original_usage = block_data.edit_info.original_usage\n original_usage_version = block_data.edit_info.original_usage_version\n block_data.edit_info.edited_on = datetime.datetime.now(UTC)\n block_data.edit_info.edited_by = user_id\n block_data.edit_info.previous_version = block_data.edit_info.update_version\n block_data.edit_info.update_version = update_version\n if original_usage:\n block_data.edit_info.original_usage = original_usage\n block_data.edit_info.original_usage_version = original_usage_version", "def last_updated(self, value):\n self._last_updated = value", "def date(self, date):\n if self.local_vars_configuration.client_side_validation and date is None: # noqa: E501\n raise ValueError(\"Invalid value for `date`, must not be `None`\") # noqa: E501\n\n self._date = date", "def election_date(self, election_date):\n\n self._election_date = election_date", "def set_modification_date(self, modification_date):\n\t\t\n\t\tif (modification_date.__class__ != str or modification_date ==\"\") and (modification_date.__class__ != time.struct_time or len(modification_date) != 9 ):\n\t\t\traise InvalidParameterError(\"modification_date\", \"modification_date is not in a proper format\")\n\t\ttry:\n\t\t\tif modification_date.__class__ == str:\n\t\t\t\ttmp_md = time.strptime(modification_date, '%S %M %H %d %m %Y')\n\t\t\telif modification_date.__class__ == time.struct_time:\n\t\t\t\ttmp_md = modification_date\n\t\t\tself.__modification_date = datetime(tmp_md[0], tmp_md[1], tmp_md[2], tmp_md[3], tmp_md[4], tmp_md[5])\t\n\t\texcept:\n\t\t\traise InvalidDate, \"date is not valid modification_date is not in a proper format\"", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def last_updated(self, last_updated: str):\n\n self._last_updated = last_updated" ]
[ "0.72074246", "0.72074246", "0.7175995", "0.6495363", "0.62919956", "0.6009002", "0.57752734", "0.5506892", "0.5506892", "0.5489573", "0.5442477", "0.54176337", "0.54176337", "0.54176337", "0.5408546", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5396654", "0.5333897", "0.5304892", "0.53018886", "0.5301887", "0.5301887", "0.5301887", "0.5301887", "0.5301887", "0.527987", "0.527223", "0.52456784", "0.5234896", "0.5206755", "0.5206755", "0.52021277", "0.5165041", "0.51150686", "0.5031946", "0.5031946", "0.50285774", "0.5024395", "0.49995872", "0.4984005", "0.4984005", "0.4984005", "0.4984005", "0.49797243", "0.49797243", "0.49797243", "0.49797243", "0.49653146", "0.4947797", "0.49285167", "0.49242994", "0.49135113", "0.49025837", "0.48989096", "0.48989096", "0.48937747", "0.48932204", "0.48932204", "0.48932204", "0.48932204", "0.48932204", "0.48932204", "0.48932204", "0.48817146", "0.48584625", "0.48468345", "0.48410448", "0.4837594", "0.48357198", "0.483196", "0.48078892", "0.48078892", "0.48078892", "0.48078892", "0.48078892", "0.48078892", "0.48078892", "0.48078892", "0.48024812", "0.47980484", "0.47956005", "0.4781873", "0.4765721", "0.47626898", "0.47573063", "0.47556862", "0.47438848", "0.47404045", "0.47404045", "0.47404045", "0.4738327" ]
0.7807129
1
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(EscrowTransactionResponse, dict): for key, value in self.items(): result[key] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n for prop in self.__properties__\n if prop in vars(self)\n }\n rv.update(self._props)\n return rv", "def get_properties(self):\n return self.properties", "def get_properties():", "def getProperties():", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def getProperties(self):\n return self.properties", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def get_model_properties(self):\n properties = {}\n\n filename = self._get_data_filename(\"modelargs.json\")\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n properties[\"image_size\"] = results.get(\"image_size\")\n properties[\"num_classes\"] = results.get(\"num_classes\")\n properties[\"model\"] = results.get(\"model\")\n properties[\"name\"] = results.get(\"name\")\n properties[\"filter_size\"] = results.get(\"filter_size\", 3)\n properties[\"increase_factor\"] = results.get(\"increase_factor\", 0)\n self.model = properties[\"name\"] # regardless of the name of the folder, this will get the proper model name (i.e. <modelname>.cntk)\n\n # optional property\n properties[\"trainer\"] = results.get(\"trainer\", \"CNTK 2.2\")\n\n self._ensure_model_file()\n properties[\"size_mb\"] = round(os.path.getsize(self.model_file) / (1000 * 1000))\n\n return properties", "def as_dict(self):\n result = {}\n for attr in self.__attr:\n result[attr] = getattr(self, attr)\n return result", "def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def get_modelDict(self):\n return self.__modelDict", "def attributes(self):\n return dict(self.__attributes)", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def to_dict(self):\n result = {}\n for p in self.json_properties():\n value = getattr(self, p)\n if isinstance(value, datetime.datetime):\n value = value.strftime('%s%f')[:-3]\n result[Jsonifiable.transform_to_camelcase(p)] = value\n return result", "def properties(self):\n return self._props", "def properties(self):\n pass", "def to_dict(self):\n d = {}\n for attr in self.__class__.attributes:\n d[attr] = getattr(self, attr)\n return d", "def properties_get(self):\n return self._get('properties')", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def getPropertyDict(self):\n \n d = self.getChild('__properties')\n if d:\n return d.getDict()\n else:\n return {}", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def to_dict(self, include=None):\n _MODEL = type(self)\n repr_dict = {}\n if include is None:\n include = []\n for name, prop in _MODEL._properties.iteritems():\n if hasattr(prop, 'public') and getattr(prop, 'public', False):\n include.append(name)\n\n for name in include:\n # check if this property is even allowed to be public\n # or has a value set\n if not hasattr(self, name):\n continue\n\n value = getattr(self, name)\n if type(getattr(_MODEL, name)) == ndb.StructuredProperty:\n if isinstance(value, list):\n items = []\n for item in value:\n items.append(item.to_dict(include=None))\n repr_dict[name] = items\n else:\n repr_dict[name] = value.to_dict(include=None)\n elif isinstance(value, date):\n repr_dict[name] = value.isoformat()\n elif isinstance(value, ndb.Key):\n repr_dict[name] = value.urlsafe()\n else:\n repr_dict[name] = value\n\n if self._key:\n repr_dict['key'] = self.get_key_urlsafe()\n return repr_dict", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def properties(self):\n\n return self._properties", "def ToDict(self):\n atributes_dictionary = {}\n for key, value in self.__dict__.iteritems():\n atributes_dictionary[key] = value\n return atributes_dictionary", "def properties(self):", "def properties(self):", "def properties(self):", "def modelPropertiesDictionary(sql_row_list):\n \n properties_dictionary = \\\n {\n \"id\": sql_row_list[0],\n \"name\": sql_row_list[1],\n \"last_deploy_timestamp\": sql_row_list[2],\n \"active_version\": sql_row_list[3],\n \"build_id\": sql_row_list[4]\n };\n\n return properties_dictionary;", "def as_dict(self):\n data = dict()\n for name in self.fields:\n val = getattr(self, name)\n if isinstance(val, Model):\n val = val.as_dict()\n elif isinstance(val, list) and val and isinstance(val[0], Model):\n val = [sub.as_dict() for sub in val]\n data[name] = val\n return data", "def to_dict(self):\n if self._dict is not None:\n return self._dict\n\n result = {}\n for key in self.ATTRIBUTES:\n value = getattr(self, key)\n if value:\n result[key] = value\n self._dict = result\n return result", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'created':\n _dict[f.name] = str(f.value_from_object(self))\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "def to_dict(self):\r\n return self.__dict__", "def properties(self):\n return None", "def properties(self):\n return None", "def to_dict(self):\n return attr.asdict(self)", "def as_dict(self):\n return self.__dict__", "def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n # \"created_by\": self.created_by,\n # \"created_on\": self.created_on,\n # \"modified_by\": self.modified_by,\n # \"modified_on\": self.modified_on\n }", "def properties(self):\r\n return resources.Properties(self)", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def to_dict(self):\n return vars(self)", "def to_dict(self):\n\n # Check if is the right instance.\n if isinstance(self, db.Model):\n # construct a dictionary from column names and values.\n dict_representation = {c.name: getattr(self, c.name) for c in self.__table__.columns}\n return dict_representation\n else:\n raise AttributeError(type(self).__name__ + \" is not instance of \" + db.Model.__name__)", "def bson_properties(self):\n return []", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name\n }", "def get_dict(self):\n return", "def to_dict(self):\n return to_dict(self.__dict__)", "def to_json(self):\n properties = self.to_dict()\n if isinstance(self, db.Model):\n properties['id'] = unicode(self.key().id())\n return json.dumps(properties)", "def to_dict(self):", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def get_attributes(self) -> Dict[str, str]:\n pass", "def config(self) -> ModelConfigDict:\n return self.config_obj.to_dict()", "def properties(self):\n return self.properties_with_uid[1:]", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def serialise(self):\n return {\n 'id': self.id,\n 'category_id': self.category_id,\n 'name': self.name,\n 'description': self.description,\n 'quantity': self.quantity,\n 'price': self.price,\n 'user_id': self.user_id\n }", "def getPropertiesAll():", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def model_info(self):\n if not self._model_info:\n self._load_model_info()\n try:\n data = json.loads(self._model_info)\n except (TypeError, ValueError):\n data = {}\n return data", "def to_dict(self):\n return {\n 'name': self.get_name(),\n 'description': self.get_description()\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def as_dict(self):\n return self.__dict__", "def to_dict(self):\r\n\r\n return {\r\n 'product_id': self.product_id,\r\n 'product_name': self.product_name\r\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def asdict(self):\n return attr.asdict(self)", "def to_dict(self) -> dict:", "def getDict(self):\n res = {}\n for attr, value in self.__dict__.iteritems():\n if type(attr) is IntType or type(attr) is StringType or type(attr) is LongType or type(attr) is UnicodeType:\n res[attr] = value\n elif isinstance(attr, datetime.datetime):\n res[attr] = value.isoformat('-')\n \n return res", "def attributes(self):\n return self.__dict.keys()", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}" ]
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.690053", "0.6881568", "0.6881568", "0.6857664", "0.68415916", "0.68122137", "0.680096", "0.67914945", "0.6757063", "0.6753585", "0.6741746", "0.6741746", "0.6741746", "0.6735291", "0.67126125", "0.6697801", "0.6695801", "0.6689893", "0.6680752", "0.66802895", "0.66802895", "0.66802895", "0.66547817", "0.66495687", "0.6633999", "0.6619567", "0.6619567", "0.66156983", "0.66049474", "0.6590706", "0.6590706", "0.6590206", "0.6587873", "0.65861845", "0.65822417", "0.65794736", "0.65792733", "0.657747", "0.6571183", "0.65662557", "0.65637356", "0.6539919", "0.65396816", "0.65283066", "0.65252614", "0.6513477", "0.65098846", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.6507418", "0.6505772", "0.65015876", "0.64951885", "0.64951885", "0.64951885", "0.64857763", "0.6474329", "0.6469453", "0.64684683", "0.6453606", "0.6453024", "0.6453024", "0.6430734", "0.6429058", "0.6426903", "0.64215595", "0.64201874", "0.6417152", "0.6414739", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.64035517" ]
0.0
-1
Returns the string representation of the model
def to_str(self): return pprint.pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def __str__(self):\n return str(self.serialize())", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n\n return self.toString()", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return grid_search_to_str(self.model)", "def __str__(self):\n return self.toString()", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def to_representation(self) -> str:\n raise NotImplementedError()", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return self.make_flat()", "def dump_model(self):", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self) -> str:\n model_str = [\"\\nModel info:\\n\", \" Unimodal encoder:\\n\"]\n\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_encoder[modality]}\")\n\n model_str.append(\"\\n\\n Unimodal decoder:\\n\")\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_decoder[modality]}\")\n\n if self.multimodal_decoder is not None:\n model_str.append(\"\\n\\n Multimodal decoder:\\n\")\n model_str.append(f\" {self.multimodal_decoder}\")\n\n return \"\".join(model_str)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def to_string(self):\r\n return self.__str__()", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __str__(self):\n return str(self.get_data())", "def __str__(self):\n return f\"model {self._name}\"", "def __str__(self):\n\n return self.raw_field", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def serialize(self):\n\n\t\treturn str(self)", "def __str__(self):\n return self.get_str()", "def serialize(self):\n\n return str(self)", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n return self.s", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel object '{0}' attribute values:\\n\".format(self.__class__.__name__)\n public_props = (name for name in dir(object) if not name.startswith('_'))\n for name in public_props:\n outstr += \" \"*indent + \"{0}: {1}\\n\".format(name, str(getattr(self,name)))\n return outstr", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442643", "0.74416703", "0.7433768", "0.7411771", "0.7405439", "0.7379557", "0.7361716", "0.7361716", "0.732774", "0.7325511", "0.732528", "0.73097324", "0.73078936", "0.73001266", "0.7296789", "0.7292791", "0.7289445", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7279803", "0.7261615", "0.7250399", "0.7244789", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068" ]
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n pass", "def _print_custom(self):\n pass", "def pypprint(*args, **kwargs): # type: ignore\n from typing import Iterable\n\n if len(args) != 1:\n print(*args, **kwargs)\n return\n x = args[0]\n if isinstance(x, dict):\n for k, v in x.items():\n print(f\"{k}:\", v, **kwargs)\n elif isinstance(x, Iterable) and not isinstance(x, str):\n for i in x:\n print(i, **kwargs)\n else:\n print(x, **kwargs)", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def print(*args, **kwargs):\n with P_LOCK:\n __builtins__.print(*args, **kwargs)", "def print(self):\n # Your implementation here", "def p(value):\n pp.pprint(value)", "def static_print(*args, __p=print, **kwargs):\n __p(*args, **kwargs)", "def print(self, *args, **kwargs):\n print(*args, **kwargs)", "def pprint(self):\n print(self.pprint_str())", "def pprint(obj):\n for argname in sorted([x for x in dir(obj) if not x.startswith('__')]):\n # Skip callables\n if hasattr(getattr(obj, argname), '__call__'):\n continue\n print(\"{} : {}\".format(argname, getattr(obj, argname)))", "def print_(self, s: str) -> None:", "def my_pprint(obj, intend = 0):\n if isinstance(obj, dict):\n for key, value in obj.items():\n print(intend*\" \"+str(key)+\" : \")\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, list):\n for value in obj:\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, bytes):\n print(\"<binary data>\")\n \n else:\n try:\n print(intend*\" \"+str(obj))\n except UnicodeDecodeError:\n print(intend*\" \"\"<?>\")", "def test_print(chikin):\n chikin.print()", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def out(*args):\r\n print(*args)", "def __pprint(object, stream=None, indent=1, width=80, depth=None):\n printer = PrettyPrinterExt(\n stream=stream, indent=indent, width=width, depth=depth)\n printer.pprint(object)", "def DumpPprint(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import pprint\n \n text = pprint.pformat(data)\n \n return text", "def repl_print_statements():\n pass", "def test_03_pass_print(self):\n print('Hello World!')", "def p(self):\n self.printstdout = True", "def print(*args, **kwargs):\n new_args = []\n for arg in args:\n if builtins.isinstance(arg, models.Point):\n new_args.append(\"({0}, {1})\".format(arg.x, arg.y))\n else:\n new_args.append(arg)\n\n builtins.print(*new_args, **kwargs)", "def real_print(*args, **kwargs):\n\n kwargs.setdefault('file', real_stdout)\n _python_print_function(*args, **kwargs)", "def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def print(self):\r\n self.print_avec_separateur()", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])", "def print(self):\n print(self.pretty_str())", "def test_print4(self):\n writer = StringIO()\n collatz_print(writer, 1, 1, 1)\n self.assertEqual(writer.getvalue(), \"1 1 1\\n\")", "def eprint(*args, **kwargs):\n\tprint(*args, file=sys.stderr, **kwargs)", "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "def use_pypprint_for_implicit_print(self) -> None:\n if self.implicit_print is not None:\n self.implicit_print.func.id = \"pypprint\" # type: ignore\n # Make sure we import it later\n self.undefined.add(\"pypprint\")", "def test_print(self):\n writer = StringIO()\n collatz_print(writer, 1, 10, 20)\n self.assertEqual(writer.getvalue(), \"1 10 20\\n\")", "def pprint(self):\n return pformat(repr(self))", "def printer(message):\n if VERBOSITY:\n pprint(message)", "def rec_print(p):\n if len(p) == 0:\n return\n t = p.pop(0)\n print t\n rec_print(p)", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def printc(*a, **kw):\n print(*a, **kw)", "def pr(x):\n Card.print_pretty_cards(x)", "def debug_print(self, *content):\n if self.debug:\n print(*content)", "def pprint(object, stream=None):\r\n printer = PrettyPrinter(stream=stream)\r\n printer.pprint(object)", "def safe_print(*objs, errors=\"replace\"):\n\tprint(*(to_stdout(str(o), errors) for o in objs))", "def magic_Pprint(self, parameter_s=''):\n \n self.shell.outputcache.Pprint = 1 - self.shell.outputcache.Pprint\n print 'Pretty printing has been turned', \\\n ['OFF','ON'][self.shell.outputcache.Pprint]", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def _Print(self, t):\n self.RaiseError(t, \"Print not supported\")", "def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line", "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def _print(self, *args):\n return _ida_hexrays.qstring_printer_t__print(self, *args)", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def hook_print():\n sys.stdout = PrintHook()", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def print_list(self):\r\n pass", "def debugprint(obj, depth=-1, print_type=False,\r\n file=None, ids='CHAR', stop_on_name=False):\r\n if file == 'str':\r\n _file = StringIO()\r\n elif file is None:\r\n _file = sys.stdout\r\n else:\r\n _file = file\r\n done = dict()\r\n results_to_print = []\r\n order = []\r\n if isinstance(obj, gof.Variable):\r\n results_to_print.append(obj)\r\n elif isinstance(obj, gof.Apply):\r\n results_to_print.extend(obj.outputs)\r\n elif isinstance(obj, Function):\r\n results_to_print.extend(obj.maker.fgraph.outputs)\r\n order = obj.maker.fgraph.toposort()\r\n elif isinstance(obj, (list, tuple)):\r\n results_to_print.extend(obj)\r\n elif isinstance(obj, gof.FunctionGraph):\r\n results_to_print.extend(obj.outputs)\r\n order = obj.toposort()\r\n elif isinstance(obj, (int, long, float, numpy.ndarray)):\r\n print obj\r\n else:\r\n raise TypeError(\"debugprint cannot print an object of this type\", obj)\r\n for r in results_to_print:\r\n debugmode.debugprint(r, depth=depth, done=done, print_type=print_type,\r\n file=_file, order=order, ids=ids,\r\n stop_on_name=stop_on_name)\r\n if file is _file:\r\n return file\r\n elif file == 'str':\r\n return _file.getvalue()\r\n else:\r\n _file.flush()", "def _get_print_fn(file=sys.stdout):\n def _print_fn(op, xin,):\n for attr in op.attrs:\n temp = getattr(xin, attr)\n if callable(temp):\n pmsg = temp()\n else:\n pmsg = temp\n print(op.message, attr, '=', pmsg, file=file)\n return _print_fn", "def test_print1(self):\n writer = StringIO()\n collatz_print(writer, 100, 200, 125)\n self.assertEqual(writer.getvalue(), \"100 200 125\\n\")", "def printOutput(self):\n pass", "def _print(self, *args):\n return _ida_hexrays.cnumber_t__print(self, *args)", "def setPrint():\n (e,d,sr,sw) = codecs.lookup('utf-8')\n unicode_to_utf8 = sw(sys.stdout)\n sys.stdout = unicode_to_utf8", "def pr(string, verbose):\n if(verbose):\n print(string)", "def print(*args, sep=\" \"):\n pass", "def printv(self, *arg):\n if self.verbose:\n print(*arg)", "def print(self):\n\n print(self)", "def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)", "def test_print2(self):\n writer = StringIO()\n collatz_print(writer, 201, 210, 89)\n self.assertEqual(writer.getvalue(), \"201 210 89\\n\")", "def print_pointers(self):\n\n ### FILL IN ###", "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "def printed(method):\n\t\tdef wrapper(cls, *args):\n\t\t\tif cls.verbose:\n\t\t\t\treturn method(cls, *args)\n\t\treturn wrapper", "def print_me(self, tabs=0, tab=' '):\n pre = tab*tabs\n print(pre+'Producer:')\n print(pre+' produces:', self._produces)\n print(pre+' consumes:', self._consumes)\n print(pre+' transfer:', self._transfer)\n print(pre+' capacity:', self._capacity)", "def _print(cls, quad):\n\t\tprint(\"\\nLIGHT OUTPUT:\\n<<<<{}>>>>\".format(ast.literal_eval(str(cls.get_address_value(quad.result)))))\n\t\tprint(\"END\")\n\n\t\tvar = cls.get_address_value(quad.result)\n\t\tif isinstance(var, collections.Iterable):\n\t\t\tprint(\"DEEP COPY\")\n\t\t\tcls.print_queue.enqueue(copy.deepcopy(var))\n\t\telse:\n\t\t\tcls.print_queue.enqueue(var)", "def printout(*args, **kwargs):\n console_print(sys.stdout, *args, **kwargs)", "def pprint(x):\n if is_theano_object(x):\n return _gettheano().printing.pprint(x)\n else:\n return str(x)", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def print(self):\n self.print_avec_separateur(\" \")", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def pprint_helper(self, angle, indent):\n # just here for defining the interface; work is done in subclasses\n pass", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n param_names = [p for p in params.keys() if p is not \"cost\"]\n param_names.sort()\n\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, name in enumerate(param_names):\n value = params[name]\n if isinstance(value, float):\n this_repr = '%s=%s' % (name, str(value))\n else:\n this_repr = '%s=%s' % (name, printer(value))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n # options = np.get_printoptions()\n # np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def init_printing(pretty_print=True, order=None, use_unicode=None):\n if pretty_print:\n stringify_func = lambda arg: pretty(arg, order=order, use_unicode=use_unicode)\n else:\n stringify_func = sstrrepr\n\n try:\n import IPython\n\n ip = IPython.ipapi.get()\n\n if ip is not None:\n def result_display(self, arg):\n \"\"\"IPython's pretty-printer display hook.\n\n This function was adapted from:\n\n ipython/IPython/hooks.py:155\n\n \"\"\"\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)\n\n ip.set_hook('result_display', result_display)\n return\n except ImportError:\n pass\n\n import __builtin__, sys\n\n def displayhook(arg):\n \"\"\"Python's pretty-printer display hook.\n\n This function was adapted from:\n\n http://www.python.org/dev/peps/pep-0217/\n\n \"\"\"\n if arg is not None:\n __builtin__._ = None\n print stringify_func(arg)\n __builtin__._ = arg\n\n sys.displayhook = displayhook", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def _print(self, *args):\n return _ida_hexrays.cinsn_t__print(self, *args)", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "def sequential_print_statements():\n pass", "def print_post():\n print('| | |'),", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def debugprint(r, prefix='', depth=-1, done=None, print_type=False,\r\n file=sys.stdout, print_destroy_map=False,\r\n print_view_map=False, order=None, ids='CHAR',\r\n stop_on_name=False, prefix_child=None):\r\n if depth == 0:\r\n return\r\n\r\n if order is None:\r\n order = []\r\n\r\n if done is None:\r\n done = dict()\r\n\r\n if print_type:\r\n type_str = ' <%s>' % r.type\r\n else:\r\n type_str = ''\r\n\r\n if prefix_child is None:\r\n prefix_child = prefix\r\n\r\n def get_id_str(obj):\r\n if obj in done:\r\n id_str = done[obj]\r\n elif ids == \"id\":\r\n id_str = \"[@%s]\" % str(id(r))\r\n elif ids == \"int\":\r\n id_str = \"[@%s]\" % str(len(done))\r\n elif ids == \"CHAR\":\r\n id_str = \"[@%s]\" % char_from_number(len(done))\r\n elif ids == \"\":\r\n id_str = \"\"\r\n done[obj] = id_str\r\n return id_str\r\n\r\n if hasattr(r.owner, 'op'):\r\n # this variable is the output of computation,\r\n # so just print out the apply\r\n a = r.owner\r\n\r\n r_name = getattr(r, 'name', '')\r\n # normally if the name isn't set, it'll be None, so\r\n # r_name is None here\r\n if r_name is None:\r\n r_name = ''\r\n\r\n if print_destroy_map:\r\n destroy_map_str = str(getattr(r.owner.op, 'destroy_map', ''))\r\n else:\r\n destroy_map_str = ''\r\n\r\n if print_view_map:\r\n view_map_str = str(getattr(r.owner.op, 'view_map', ''))\r\n else:\r\n view_map_str = ''\r\n if destroy_map_str and destroy_map_str != '{}':\r\n destroy_map_str = 'd=' + destroy_map_str\r\n if view_map_str and view_map_str != '{}':\r\n view_map_str = 'v=' + view_map_str\r\n\r\n o = ''\r\n if order:\r\n o = str(order.index(r.owner))\r\n already_printed = a in done # get_id_str put it in the dict\r\n id_str = get_id_str(a)\r\n\r\n if len(a.outputs) == 1:\r\n print >> file, '%s%s %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n id_str,\r\n type_str, r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n else:\r\n print >> file, '%s%s.%i %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n a.outputs.index(r),\r\n id_str, type_str,\r\n r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n if not already_printed:\r\n if (not stop_on_name or\r\n not (hasattr(r, 'name') and r.name is not None)):\r\n new_prefix = prefix_child + ' |'\r\n new_prefix_child = prefix_child + ' |'\r\n for idx, i in enumerate(a.inputs):\r\n if idx == len(a.inputs) - 1:\r\n new_prefix_child = prefix_child + ' '\r\n\r\n debugprint(i, new_prefix, depth=depth - 1, done=done,\r\n print_type=print_type, file=file, order=order,\r\n ids=ids, stop_on_name=stop_on_name,\r\n prefix_child=new_prefix_child)\r\n else:\r\n #this is an input variable\r\n id_str = get_id_str(r)\r\n print >> file, '%s%s %s%s' % (prefix, r, id_str, type_str)\r\n\r\n return file", "def bpprint(self, out=None):\n if out is None:\n out = sys.stdout\n print(self.bpformat(), file=out)", "def vprint(expr, **settings):\n\n outstr = vsprint(expr, **settings)\n\n import builtins\n if (outstr != 'None'):\n builtins._ = outstr\n print(outstr)", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n options = numpy.get_printoptions()\n numpy.set_printoptions(precision=5, threshold=64, edgeitems=2)\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, (k, v) in enumerate(sorted(params.items())):\n if isinstance(v, float):\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n this_repr = '%s=%s' % (k, str(v))\n else:\n # use repr of the rest\n this_repr = '%s=%s' % (k, printer(v))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if this_line_length + len(this_repr) >= 75 or '\\n' in this_repr:\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n\n numpy.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def print_(*args, **kwargs):\n fp = kwargs.pop(\"file\", sys.stdout)\n if fp is None:\n return\n\n def write(data):\n if not isinstance(data, basestring):\n data = str(data)\n fp.write(data)\n want_unicode = False\n sep = kwargs.pop(\"sep\", None)\n if sep is not None:\n if isinstance(sep, unicode):\n want_unicode = True\n elif not isinstance(sep, str):\n raise TypeError(\"sep must be None or a string\")\n end = kwargs.pop(\"end\", None)\n if end is not None:\n if isinstance(end, unicode):\n want_unicode = True\n elif not isinstance(end, str):\n raise TypeError(\"end must be None or a string\")\n if kwargs:\n raise TypeError(\"invalid keyword arguments to print()\")\n if not want_unicode:\n for arg in args:\n if isinstance(arg, unicode):\n want_unicode = True\n break\n if want_unicode:\n newline = unicode(\"\\n\")\n space = unicode(\" \")\n else:\n newline = \"\\n\"\n space = \" \"\n if sep is None:\n sep = space\n if end is None:\n end = newline\n for i, arg in enumerate(args):\n if i:\n write(sep)\n write(arg)\n write(end)" ]
[ "0.75577617", "0.73375154", "0.6986672", "0.698475", "0.6944995", "0.692333", "0.6899106", "0.6898902", "0.68146646", "0.6806209", "0.6753795", "0.67497987", "0.6744008", "0.6700308", "0.6691256", "0.6674591", "0.6658083", "0.66091245", "0.6606931", "0.6601862", "0.6563738", "0.6561717", "0.65549695", "0.6494838", "0.6473391", "0.64491546", "0.6411177", "0.6340302", "0.6339321", "0.6335031", "0.6332035", "0.6315847", "0.631272", "0.6297732", "0.62969106", "0.6283717", "0.6279154", "0.6271603", "0.62673396", "0.6265511", "0.62629336", "0.6258366", "0.6258278", "0.62501305", "0.6248315", "0.62459755", "0.6244254", "0.6242083", "0.62393075", "0.62156516", "0.6208198", "0.62068796", "0.62062824", "0.62062824", "0.6194123", "0.6189738", "0.6183852", "0.6183035", "0.61697906", "0.61614454", "0.6160741", "0.61544997", "0.61528033", "0.6150831", "0.6147288", "0.61380607", "0.613793", "0.61300766", "0.61278135", "0.6125416", "0.6114217", "0.61126333", "0.6100682", "0.60998785", "0.6096818", "0.6081694", "0.6076982", "0.6072701", "0.6060028", "0.60581726", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6043662", "0.6037599", "0.60336643", "0.6030174", "0.60290223", "0.60242903", "0.6016989", "0.6004274", "0.60005474", "0.60005474", "0.60003483", "0.599558", "0.59923434", "0.5979316", "0.59777945" ]
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, EscrowTransactionResponse): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def is_equal(self, a, b):\n return a is b", "def is_equal(self, a, b):\n return a == b", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.__dict__ == other.__dict__\r\n else:\r\n return False", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def __eq__(self,other):\n return self is other", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def __eq__(self, other):\n return type(self) == type(other) and self.id == other.id", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Single2HaObject):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__hash__() == other.__hash__()\n return False", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def __eq__(self, other) -> bool:\n if other is None:\n return False\n return self.__hash__() == other.__hash__()", "def __eq__(self, other):\n if not isinstance(other, ObjectInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\n return id(self) == id(other)", "def __eq__(self, other) -> bool:\n return type(self) == type(other) and \\\n self._id == other.id and \\\n self.code == other.code and \\\n self.name == other.name and \\\n self.gender == other.gender and \\\n self.date_of_birth == other.date_of_birth", "def equals(self, other): # -> bool:\n ...", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return (\n self.name,\n self.submit_at,\n self.subreddit,\n self.title,\n self.body_template,\n ) == (\n other.name,\n other.submit_at,\n other.subreddit,\n other.title,\n other.body_template,\n )", "def __eq__(self, other):\n # Check that we share the same class as this object\n if not isinstance(other, type(self)):\n return False\n\n return hash(self) == hash(other)", "def __eq__(self, other):\n if not isinstance(other, PreviewObjectAutofill):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return equal(self, other)", "def __eq__(self, other: Any) -> bool:\n return self.__class__ is other.__class__ and self.identifier == other.identifier", "def __eq__(self, other):\n return self.__id == other.get_id()", "def __eq__ (self, other):\n if type(self) == type(other):\n return self._m == other._m\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Referent):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.properties == other.properties", "def __eq__(self, other):\n return self.items() == other.items()", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n\n if self is other:\n return True\n return hash(self) == hash(other)", "def __eq__(self, other):\n if other._field1 == self._field1:\n return True\n return False", "def same_as(self, other):\n return super().__eq__(other)", "def __eq__(self, other):\n try:\n return other and \\\n self.id == other.id\n\n except AttributeError:\n return False", "def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()", "def __eq__(self, other):\n if not isinstance(other, Fiddle):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.7961088", "0.7961088", "0.79433626", "0.79303336", "0.7926563", "0.7897525", "0.78826123", "0.78826123", "0.78806067", "0.7872423", "0.7868354", "0.78668815", "0.7825702", "0.7819993", "0.78162885", "0.78078854", "0.78068274", "0.7796298", "0.7794721", "0.7784825", "0.77790844", "0.7769397", "0.77534705", "0.7746211", "0.7741107", "0.77282816", "0.7725766", "0.7719537", "0.770273", "0.7685999", "0.7677552", "0.76739407", "0.7664857", "0.76557016", "0.7655046", "0.76282835", "0.7625795", "0.76242626", "0.76237214", "0.76237214", "0.76237214", "0.7617347", "0.7600536", "0.7599156", "0.7595863", "0.75945824", "0.7594092", "0.75899327" ]
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n return not self.__ne__(other)", "def __ne__(self, other):\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__ (self, other):\n return not self == other" ]
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Helper to log the failed SQS records metric
def _log_failed(cls, count): MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_FAILED_RECORDS, count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_failures(self):\n for exception in self.queue_manager.failure_descriptions():\n self.logger.info(exception)", "def test_failed_deliveries_logging(self):\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=0)\n self.assertEqual(sms.logs.count(), 0)\n\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=1)\n self.assertEqual(sms.logs.count(), 1)\n\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=2)\n self.assertEqual(sms.logs.count(), 1)", "def append_record_failure():\n\t\tpass", "def _check_failures(self, response, batch=None):\n if not response.get('Failed'):\n return 0 # nothing to do here\n\n LOGGER.error('The following records failed to put to queue %s', self.queue.url)\n\n for failure in response['Failed']:\n # Pull out the record that matches this ID\n record = self._extract_message_by_id(batch, failure['Id']) if batch else None\n LOGGER.error(self._format_failure_message(failure, record=record))\n\n failed = len(response.get('Failed', []))\n self._log_failed(failed)\n\n # Raise an exception if this is the fault of the sender (us)\n if any(result['SenderFault'] for result in response['Failed']):\n raise SQSClientError('Failed to send records to SQS:\\n{}'.format(response))\n\n return failed", "def log_failure(self, request):\n self.log_file.write(self.TYPE_FAILURE + \",%f,,,%f,,\\n\" %\n (float(request.resources[0]['amount']),\n float(request.offer)))", "def submit_errors_metric(lambda_context):\n if not are_enhanced_metrics_enabled():\n return\n\n lambda_metric(\n \"{}.errors\".format(ENHANCED_METRICS_NAMESPACE_PREFIX),\n 1,\n tags=get_enhanced_metrics_tags(lambda_context),\n )", "def test_results_error_stacktrace(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError('Shopping'))\n batch_job = BatchJob(affiliate_items, updater)\n\n with_message = 0\n for result in batch_job.run():\n with_message += (result.is_error and 'Shopping' in result.details)\n\n assert with_message == 4", "def record_failure(self, now=None) -> None:\n logging.info('Recording failure at %r', now or int(time.time()))\n self.failure_timestamp = now or int(time.time())\n self.put()", "def test_failed_deliveries_logging(self):\n email = Email.objects.create(from_email='[email protected]',\n to=['[email protected]'], status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_mail', log_level=0)\n self.assertEqual(email.logs.count(), 0)\n\n email = Email.objects.create(from_email='[email protected]',\n to=['[email protected]'], status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_mail', log_level=1)\n self.assertEqual(email.logs.count(), 1)\n\n email = Email.objects.create(from_email='[email protected]',\n to=['[email protected]'], status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_mail', log_level=2)\n self.assertEqual(email.logs.count(), 1)", "def identify_result_error(self, record):\n return [\"error\"]", "def test_unique_buckets_invalid_record(self, mock_logging):\n self.client.received_messages = [{'Body': '{\"missing-key\": 1}'}]\n unique_buckets = self.client.unique_buckets_from_messages()\n\n assert_false(unique_buckets)\n assert_true(mock_logging.error.called)", "def manage_kafka_error(msg):\n logger.error(msg.error())", "def failure(self):\n self.logger.debug(\"Logging failure for %s\", self.key)\n self.failures = self.driver.failure(self.key)", "def test_unique_buckets_invalid_sqs(self, mock_logging):\n self.client.received_messages = ['wrong-format-test']\n unique_buckets = self.client.unique_buckets_from_messages()\n\n assert_false(unique_buckets)\n assert_true(mock_logging.error.called)", "def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)", "def _log_error(self, err_msg):\n if self._on_error_action == \"raise\":\n raise InvalidDatasetError(err_msg)\n else:\n logger.warning(err_msg)", "def test_sqs_log_handler_error(self):\n try:\n extra = {\n \"test\": \"test logging\",\n \"num\": 1,\n 5: \"9\",\n \"float\": 1.75,\n \"nested\": {\"more\": \"data\"}\n }\n self.logger.error(\"test info message with properties\", extra=extra)\n body = self.retrieve_message()\n expected = (\"\"\"{\"asctime\": \"2016-01-01 00:00:00,000\", \"levelname\": \"ERROR\",\"\"\"\n \"\"\" \"message\": \"test info message with properties\",\"\"\"\n \"\"\" \"5\": \"9\", \"float\": 1.75, \"num\": 1,\"\"\"\n \"\"\" \"test\": \"test logging\", \"nested\": {\"more\": \"data\"}}\"\"\")\n except BaseException as err:\n self.fail(\"Should not raise exception, got {} instead\".format(err))\n self.assertEqual(body, expected)", "def error_handler(self, failure):\n log.error(failure)", "def log_failure(self, obj, message):\n super().log_failure(obj=obj, message=message)", "def _failed(self, msg):\n self.log(msg)\n self.result.passed = False\n self.result.add_error(msg)\n self.log(u\"Failed\")", "async def test_failed_samples(self):\n self.set_source_parameter(\"test_result\", [\"failed\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"6\", entities=[])", "def fail(msg):\n log('FAIL', msg)", "def remove_record_failure():\n\t\tpass", "def on_failed(self, status_code: int, request: Request):\n self.update_rate_limit(request)\n\n data = request.response.json()\n error = data[\"error\"]\n msg = f\"请求失败,状态码:{status_code},类型:{error['name']}, 信息:{error['message']}\"\n self.gateway.write_log(msg)", "def _handle_error(self, failure, item, spider):\n # do nothing, just log\n log.err(failure)", "def error(self, msg):\r\n self.logger.error(msg)", "def catchError(custom_message = \"\"):\n exc_type, exc_value, exc_traceback = sys.exc_info() \n traceback_details = {\n 'filename': exc_traceback.tb_frame.f_code.co_filename,\n 'lineno' : exc_traceback.tb_lineno,\n 'name' : exc_traceback.tb_frame.f_code.co_name,\n 'error_type' : exc_type.__name__,\n 'logtype' : \"error\",\n 'custom_message' : custom_message,\n 'message' : str(exc_value), # or see traceback._some_str()\n 'datetime' : str(datetime.datetime.now())\n }\n del(exc_type, exc_value, exc_traceback) # So we don't leave our local labels/objects dangling\n #print(traceback_details)\n query = {\"collection_name\" : \"toolLogs\", \"data\":traceback_details}\n dbu.insertData(query)", "def failed(self, id, err=''):\n\n records = self.db.get_table()\n index = -1\n\n for i in range(0, len(records)):\n if str(records[i][\"id\"]) == str(id):\n index = i\n \n if index == -1:\n return None\n\n records[index][\"status\"] = \"failed\"\n if 'end-time' in records[index]:\n records[index][\"end-time\"] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if 'comments' in records[index]:\n records[index][\"comments\"] += \" failed{ \" + err + \" };\"\n\n self.db.update_row(index, records[index])\n\n _log.info('Test %s marked as failed with message %s.' % (str(id), str(err)))\n \n return records[index]", "def test_error_logging(self):\n # Verify nothing in the journal\n assert len(Record.objects.recent('heartbeat')) == 0\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': 'foosurvey',\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n 'question_text': 'how was lunch?',\n 'variation_id': '1'\n }\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 400\n errors = json.loads(resp.content)['errors']\n assert len(errors) > 0\n\n # Verify there's one entry now.\n assert len(Record.objects.recent('heartbeat')) == 1", "def error(update, context):\n logging.warning('Update \"%s\" ', update)\n logging.exception(context.error)", "def test_error(self):\n metric = self.metric()\n measurement = self.measurement(metric, sources=[self.source(metric, parse_error=\"error\")])\n self.assertEqual(None, measurement[\"count\"][\"value\"])", "def error(self, msg, stderr=True):\n self.log(msg, level=self.ERROR, stderr=stderr)", "def _stab_log_error(self, logconf, msg):\n\t\tprint \"Error when logging %s: %s\" % (logconf.name, msg)", "def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )", "def fail(self, message):\n logger.warning(message)\n g.failed = True", "def test_results_errors(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError())\n batch_job = BatchJob(affiliate_items, updater)\n\n error_count = 0\n for result in batch_job.run():\n error_count += int(result.is_error)\n\n assert error_count == 4", "def error(self, _strMessage=\"\"):\n self.edLogging.error(_strMessage)", "def error(update, context):\n logger.warning(f'caused error {context.error}')", "def failed_messages(self, namespace, queue):\n failed = []\n for m in self.messages(namespace, queue):\n if m.error:\n failed.append(m)\n return failed", "def error(msg):\n return log().error(msg)", "def on_failure(self, exc: BaseException) -> None:", "def error(self, msg):\n\n self.logger.error(msg)", "def test_gcp_write_records_on_error(sdc_builder, sdc_executor, gcp,\n on_error_record, start_and_check):\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n data = '\\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE_WITH_ERROR)\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev raw data source\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data=data,\n stop_after_first_batch=True)\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_name,\n table=table_name,\n bucket=bucket_name,\n staging_file_format='CSV',\n enable_data_drift=False,\n create_table=False,\n on_record_error=on_error_record,\n purge_stage_file_after_ingesting=True)\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> [bigquery, wiretap.destination]\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n pipeline.configuration['shouldRetry'] = False\n sdc_executor.add_pipeline(pipeline)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n logger.info('Creating dataset %s and table %s using Google BigQuery client ...', dataset_name, table_name)\n bigquery_client.create_dataset(dataset_ref)\n bigquery_client.create_table(Table(dataset_ref.table(table_name), schema=SCHEMA))\n\n start_and_check(sdc_executor, pipeline, wiretap)\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def error(update, context):\n\n logger.warning(f'Update {update} caused error {context.error}')", "def audit_failed(self):\n\n return self.__failed", "def test_adns_get_data_error(self):\n test_sensor_error_data = {\n \"sensorid\": int(self.test_sens_data['sensorid']),\n \"error\": \"Exception\",\n \"code\": 1,\n \"message\": \"DNS sensor failed. See log for details\"\n }\n self.test_adns.get_data(self.test_sens_data, self.test_out_queue)\n assert_equal(self.test_out_queue.get(), test_sensor_error_data)", "def log_handler(event):\n try:\n if not event.get('isError') or 'failure' not in event:\n return\n\n err = event['failure']\n\n # Don't report Rollbar internal errors to ourselves\n if issubclass(err.type, ApiException):\n log.error('Rollbar internal error: %s', err.value)\n else:\n report_exc_info((err.type, err.value, err.getTracebackObject()))\n except:\n log.exception('Error while reporting to Rollbar')", "def _failed_tests(self, metric_source_id: str) -> int:\n return self.__test_count(metric_source_id, 'failed')", "def log_error(self, fmt, *args):\r\n pass\r\n # log_error\r", "def process_failure(task, err):\n msg = \"{} process failure {}\".format(task, err)\n message_kafka(\"Process Failed\", task, msg)", "def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):\n\n DATA = {'name': 'Al Gore', 'birthplace': 'Washington, D.C.'}\n on_record_error = stage_attributes['on_record_error']\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.data_format = 'JSON'\n dev_raw_data_source.raw_data = json.dumps(DATA)\n dev_raw_data_source.stop_after_first_batch = True\n\n field_replacer = pipeline_builder.add_stage('Field Replacer')\n field_replacer.set_attributes(replacement_rules=[{'setToNull': False, 'fields': '/age'}],\n field_does_not_exist='TO_ERROR',\n **stage_attributes)\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> field_replacer >> wiretap.destination\n\n pipeline = pipeline_builder.build()\n\n sdc_executor.add_pipeline(pipeline)\n\n if on_record_error == 'DISCARD':\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert not wiretap.error_records and not wiretap.output_records\n\n elif on_record_error == 'STOP_PIPELINE':\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_status('RUN_ERROR')\n\n assert False, 'An exception should have been thrown'\n except RunError:\n\n assert not wiretap.error_records and not wiretap.output_records\n\n elif on_record_error == 'TO_ERROR':\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n record = wiretap.error_records[0]\n assert record.field == DATA and not wiretap.output_records", "def transaction_failed_before_processing(self):", "def test_failed_job(self):\n failed_job = json.loads(BASE_JSON % (FAILURE, 1433166610, 1, 1433166609))[0]\n self.assertEquals(self.query_api.get_job_status(failed_job), FAILURE)", "def failed_items(self) -> ItemLog:\n if self._failed_items is None:\n self._failed_items = ItemLog(self.dir / 'dropped-failed.log.csv', DROPPED_FAILED_FIELDNAMES, 'id')\n return self._failed_items", "def log(failure):\n return self._env.logger.warning('[ping] {}'.format(failure.getErrorMessage()))", "def error(self, msg):\n self.__logger.error(msg)", "def transaction_failed(self):", "def _stab_log_error(self, logconf, msg):\n print('Error when logging %s: %s' % (logconf.name, msg))", "def respond_failed(conn, logger, token, details, reason):\n try:\n out = conn.respond_activity_task_failed(token, str(details), str(reason))\n logger.info('respond_activity_task_failed returned %s' % out)\n except boto.exception.SWFResponseError as e:\n _log_swf_response_error(logger, e)", "def on_error(data):\n print('Market Data Error', data)", "def logError(e):\r\n print(e)", "def error(update, context):\n logger.error('Update \"%s\" caused error \"%s\"', update, context.error)", "def error(self, message):\n return self.log(\"ERROR\", message)", "def _handle_error(self, failure, item, spider):\n self.logger.error(\"失败原因:{}, 失败对象{}\".format(failure, item))", "def _log_err_msg(self, message):\n current_time = time.time()\n if current_time - self._last_warning_time > 600:\n logging.warning(message)\n self._last_warning_time = current_time", "def log_error(self, msg):\n self.log(msg, level=LOG_ERROR)", "def glue_failure_handler(event, context) -> None:\n\n # get lambda variables\n environment_level = os.environ[\"ENVIRONMENT\"]\n sns_topic_name = os.environ[\"PROCESSING_NOTIFICATION_SNS\"]\n\n # fetches the response in dict received from eventbridge rule when 'event pattern' matches\n # here failing a glue job is an event pattern. this is already stated in event rule 'glue_failure_rule'\n # the output from below will be like --> {\"version\": \"0\", \"id\": \"26c9301e-6aec-872d-4739-1aa6ebc66853\", \"detail-type\": \"Glue Job State Change\", \"source\": \"aws.glue\", \"account\": \"795038802291\", \"time\": \"2021-03-24T07:37:07Z\", \"region\": \"us-east-1\", \"resources\": [], \"detail\": {\"jobName\": \"rahul_create_warm_pool\", \"severity\": \"ERROR\", \"state\": \"FAILED\", \"jobRunId\": \"jr_bdb3f0d56b0f842c7337be54927deb6ddb78a1f37956d9817b0f11710602139f\", \"message\": \"Command failed with exit code 1\"}}\n event_response = json.dumps(event)\n\n # calling GlueFailureEvent class\n glue_failure_event = GlueFailureEvent(event_response)\n\n # get failure time (event occurrence)\n event_occurrence_time = glue_failure_event.time # returns ISO UTC time (ex: '2021-03-24T15:52:37Z')\n # convert ISO UTC time to regular UTC time and then to ET local time\n converted_time = event_occurrence_time.replace(\"T\",\" \").replace(\"Z\",\".0\")\n _converted_time = datetime.strptime(converted_time, \"%Y-%m-%d %H:%M:%S.%f\")\n local_time = convert_utc_to_local(_converted_time, DEFAULT_TIMEZONE)\n\n if glue_failure_event.detail['jobRunId'].endswith(\"attempt_1\"):\n # we dont want get notified for attempt1 run failure because this will always be failure in real time.\n # so its an attempt to avoid 2 failure alerts for each failed glue job event\n return None\n # form subject, message required for publishing SNS\n subject = f\"{environment_level}: Glue job Failure\"\n message = (\n f\"Glue job '{glue_failure_event.detail['jobName']}' has {glue_failure_event.detail['state']} at {local_time}. Below are the details:{os.linesep}{os.linesep}\"\n f\"Error : {glue_failure_event.detail['message']}{os.linesep}\"\n f\"{os.linesep}\"\n f\"JobRunId : {glue_failure_event.detail['jobRunId']}\"\n )\n\n send_ahub_email_notification(sns_topic_name, subject, message)", "def logFailure(failure, msg='Unhandled exception in deferred:'):\n logging.error('%s\\n%s', msg, failure.getTraceback())", "def stream_error(self,err):\n self.__logger.debug(\"Stream error: condition: %s %r\"\n % (err.get_condition().name,err.serialize()))", "def error(self, *args):\n self.mylog.error(*args)", "def error(msg):\n log('ERROR', msg)", "def test_value_error(self, mock_logger, mock_hana_range, mock_format_query):\n\n self._collector._manage_gauge = mock.Mock()\n\n self._collector._manage_gauge.side_effect = ValueError('test')\n mock_hana_range.return_value = True\n\n metrics1_1 = mock.Mock(type='gauge')\n metrics1 = [metrics1_1]\n query1 = mock.Mock(enabled=True, query='query1', metrics=metrics1, hana_version_range=['1.0'])\n\n self._collector._metrics_config.queries = [query1]\n\n for _ in self._collector.collect():\n continue\n\n mock_logger.assert_called_once_with('test')", "def error(self, msg: str):\n self._logger.error(msg)", "def logFailure(self, *args):\n return _libsbml.SBMLValidator_logFailure(self, *args)", "def error(update: Update, context: CallbackContext):\n logging.warning('Update \"%s\" caused error \"%s\"', update, context.error)", "def log_error(e):\n\tprint(e)", "def log_error(e):\n\tprint(e)", "def log_error(err):\n print(err)", "def log_error(e):\r\n print(e)", "def log_error(e):\r\n print(e)", "def error(self, update, context):\n self.logger.error('Update \"%s\" caused error \"%s\"' % (update, context.error))", "def _reportError(self, failure):\r\n self._connection.reportError(failure.getErrorMessage())", "def callback_fail_message(request):\n msg = 'Form storing has failed :('\n logger.error(msg)\n messages.error(request._request, msg)", "def logerror(self, msg):\n self.logger.error(msg)", "def error(self, *args, **kwargs):\n self.msg(logging.ERROR, *args, **kwargs)", "def error(update: Update, context: CallbackContext):\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)", "def error(update, context):\n\tlogger.warning('Update \"%s\" caused error \"%s\"', update, context.error)", "def error(update, context):\n\tlogger.warning('Update \"%s\" caused error \"%s\"', update, context.error)", "def ERROR(self, _strMessage=\"\"):\n self.edLogging.ERROR(_strMessage)", "def exception_handler(exctype, val, trace):\n logger.info(\n ''.join(traceback.format_exception(exctype, val, trace)))", "def log_message(self, build_id, record):\n # Todo: provide \"shortcut\" methods to convert the traceback\n # (from exc_info) to a serializable object, and to clean\n # up the record object for decent serialization in the\n # database.\n pass", "def on_error(ws, error):\n logging.error(\"Error:-\", error)", "def log_error(self, line):\n logging.error(\"Telemetry Logger - %s\" % line)", "def test_unique_buckets_non_s3_notification(self, mock_logging):\n self.client.received_messages = [{'Body': '{\"Records\": [{\"kinesis\": 1}]}'}]\n unique_buckets = self.client.unique_buckets_from_messages()\n\n assert_false(unique_buckets)\n assert_true(mock_logging.info.called)\n assert_true(mock_logging.debug.called)", "def _finalize(self, response, batch):\n if not response:\n return # Could happen in the case of backoff failing enitrely\n\n # Check for failures that occurred in PutRecordBatch after several backoff attempts\n # And log the actual record from the batch\n failed = self._check_failures(response, batch=batch)\n\n # Remove the failed messages in this batch for an accurate metric\n successful_records = len(batch) - failed\n\n MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_RECORDS_SENT, successful_records)\n LOGGER.info(\n 'Successfully sent %d message(s) to queue %s',\n successful_records,\n self.queue.url\n )", "def test_error_no_json(self, app, data_queues, metricsmock):\n res = self._call(app, \"\\xae\", method=\"post\", status=400)\n detail = \"JSONDecodeError('Expecting value: line 1 column 1 (char 0)')\"\n self.check_response(data_queues, res, \"parse_error\", details={\"decode\": detail})\n metricsmock.assert_incr_once(\n self.metric_type + \".request\", tags=[self.metric_path, \"key:test\"]\n )", "def log_error(self, msg):\n self.logger.error(msg)", "def error(update, context):\n\tlogger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n\t#TODO find out how this works", "def _message_failed_job(self):\n self.ensure_one()\n return _(\"Something bad happened during the execution of the job. \"\n \"More details in the 'Exception Information' section.\")", "def error(self, msg, *args):\n if self.lvl<=logging.ERROR: return self._log(msg, *args)" ]
[ "0.65698266", "0.6314558", "0.6210403", "0.6180184", "0.61691684", "0.6075002", "0.6070457", "0.6059748", "0.6040478", "0.598686", "0.5963177", "0.5926306", "0.5920489", "0.5919631", "0.58492374", "0.5753404", "0.5748351", "0.5638719", "0.5619237", "0.56000847", "0.55767053", "0.554524", "0.55307674", "0.55232894", "0.54596126", "0.54188305", "0.54008555", "0.5392786", "0.53883153", "0.5387347", "0.5385078", "0.5369263", "0.5366223", "0.5363425", "0.5362841", "0.5354783", "0.53504014", "0.5347535", "0.53391343", "0.53371394", "0.53368676", "0.533367", "0.53251964", "0.53171813", "0.5306061", "0.52970684", "0.529545", "0.5292553", "0.52880114", "0.52828544", "0.5273267", "0.526898", "0.5252318", "0.5248071", "0.5244282", "0.5243638", "0.5243399", "0.52346325", "0.5231983", "0.5228258", "0.52211773", "0.52209496", "0.5219444", "0.5219335", "0.52175367", "0.5216033", "0.5213052", "0.5210433", "0.5209423", "0.52047044", "0.51993555", "0.519669", "0.5182809", "0.5162832", "0.5151078", "0.51490134", "0.51490134", "0.5142219", "0.51376414", "0.51376414", "0.5135178", "0.5133059", "0.5124866", "0.51105815", "0.51077414", "0.50972784", "0.50950426", "0.50950426", "0.509454", "0.5091449", "0.5091372", "0.50894004", "0.50868607", "0.50804937", "0.507886", "0.50710154", "0.5066392", "0.5060358", "0.5059505", "0.5054727" ]
0.8446626
0
Segment the records into batches that conform to SQS restrictions This will log any single record that is too large to send, and skip it.
def _message_batches(cls, records): # Dump the records to a list of minimal json records_json = [ json.dumps(record, separators=(',', ':')) for record in records ] current_batch_size = 0 current_batch = [] for record in records_json: line_len = len(record) # Check if the max size of the batch has been reached or if the current # record will exceed the max batch size and start a new batch if ((len(current_batch) == cls.MAX_BATCH_COUNT) or (current_batch_size + line_len > cls.MAX_BATCH_SIZE)): yield current_batch[:] current_batch_size = 0 del current_batch[:] if line_len > cls.MAX_BATCH_SIZE: LOGGER.error('Record too large (%d) to send to SQS:\n%s', line_len, record) cls._log_failed(1) continue # Add the record to the batch current_batch_size += line_len current_batch.append(record) # yield the result of the last batch (no need to copy via slicing) if current_batch: yield current_batch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log_process(self, log_req):\n rq_size = log_req.multipart_size\n with self._lock:\n if self._payload_size + rq_size >= self.max_payload_size:\n if len(self._batch) > 0:\n self._send_batch()\n self._batch.append(log_req)\n self._payload_size += rq_size\n if len(self._batch) >= self.max_entry_number:\n self._send_batch()", "def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH", "def put_records_batch(\n client, stream_name: str, records: list, max_retries: int, max_batch_size: int = 500\n) -> None or List[dict]:\n\n retry_list = []\n\n for batch_index, batch in enumerate(split_list(records, max_batch_size)):\n records_to_send = create_records(batch)\n retries_left = max_retries\n\n while len(records_to_send) > 0:\n kinesis_response = client.put_records(\n Records=records_to_send, StreamName=stream_name,\n )\n\n if kinesis_response[\"FailedRecordCount\"] == 0:\n break\n else:\n index: int\n record: dict\n for index, record in enumerate(kinesis_response[\"Records\"]):\n if \"ErrorCode\" in record:\n # original records list and response record list have same order, guaranteed:\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesis.html#Kinesis.Client.put_records\n logger.error(\n f\"A record failed with error: {record['ErrorCode']} {record['ErrorMessage']}\"\n )\n retry_list.append(records_to_send[index])\n\n records_to_send = retry_list\n retry_list = []\n\n if retries_left == 0:\n error_msg = (\n f\"No retries left, giving up on records: {records_to_send}\"\n )\n logger.error(error_msg)\n return records_to_send\n\n retries_left -= 1\n\n logger.info(f\"Waiting 500 ms before retrying\")\n time.sleep(0.5)\n\n return None", "def _send_messages(self, batched_messages):\n @backoff.on_predicate(backoff.fibo,\n lambda resp: len(resp.get('Failed', [])) > 0,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n max_value=self.MAX_BACKOFF_FIBO_VALUE,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n @backoff.on_exception(backoff.expo, self.EXCEPTIONS_TO_BACKOFF,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n def _send_messages_helper(entries):\n \"\"\"Inner helper function for sending messages with backoff_handler\n\n Args:\n entries (list<dict>): List of SQS SendMessageBatchRequestEntry items\n \"\"\"\n LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url)\n\n response = self.queue.send_messages(Entries=entries)\n\n if response.get('Successful'):\n LOGGER.info(\n 'Successfully sent %d message(s) to %s with MessageIds %s',\n len(response['Successful']),\n self.queue.url,\n ', '.join(\n '\\'{}\\''.format(resp['MessageId'])\n for resp in response['Successful']\n )\n )\n\n if response.get('Failed'):\n self._check_failures(response) # Raise an exception if this is our fault\n self._strip_successful_records(entries, response)\n\n return response\n\n message_entries = [\n {\n 'Id': str(idx),\n 'MessageBody': message\n } for idx, message in enumerate(batched_messages)\n ]\n\n # The try/except here is to catch any raised errors at the end of the backoff\n try:\n return _send_messages_helper(message_entries)\n except self.EXCEPTIONS_TO_BACKOFF:\n LOGGER.exception('SQS request failed')\n # Use the current length of the message_entries in case some records were\n # successful but others were not\n self._log_failed(len(message_entries))\n return", "def record_batch_size(self):\n return 10000", "def _finalize(self, response, batch):\n if not response:\n return # Could happen in the case of backoff failing enitrely\n\n # Check for failures that occurred in PutRecordBatch after several backoff attempts\n # And log the actual record from the batch\n failed = self._check_failures(response, batch=batch)\n\n # Remove the failed messages in this batch for an accurate metric\n successful_records = len(batch) - failed\n\n MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_RECORDS_SENT, successful_records)\n LOGGER.info(\n 'Successfully sent %d message(s) to queue %s',\n successful_records,\n self.queue.url\n )", "def _flush_batch(self) -> None:\n batch_len = len(self._current_batch)\n if batch_len == 0:\n self.logger.debug('Nothing to flush.')\n return\n\n self.logger.debug(f'Flushing batch size {batch_len}')\n\n with self.LOCK:\n to_process_batch = list(self._current_batch)\n self._current_batch = list()\n\n log_event = EventFactory.create_log_event(to_process_batch, self.logger)\n\n self.notification_center.send_notifications(enums.NotificationTypes.LOG_EVENT, log_event)\n\n if log_event is None:\n self.logger.exception('Error dispatching event: Cannot dispatch None event.')\n return\n\n try:\n self.event_dispatcher.dispatch_event(log_event)\n except Exception as e:\n self.logger.error(f'Error dispatching event: {log_event} {e}')", "def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches", "def beat_inbox_sms_bulk():\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()\n\n while list_of_sms_notifications:\n save_smss.apply_async((None, list_of_sms_notifications, receipt_id_sms), queue=QueueNames.BULK_DATABASE)\n current_app.logger.info(f\"Batch saving with Bulk Priority: SMS receipt {receipt_id_sms} sent to in-flight.\")\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()", "def test_exceed_limit_request(self):\n actions.login(ADMIN_EMAIL)\n ids_list = list(range(SkillAggregateRestHandler.MAX_REQUEST_SIZE))\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': ids_list}, True))\n\n response = transforms.loads(self.get(get_url).body)\n self.assertEqual(412, response['status'])", "def process_data():\n for message in get_messages_from_sqs():\n try:\n message_content = json.loads(message.body)\n input_file = urllib.unquote_plus(message_content\n ['Records'][0]['s3']['object']\n ['key']).encode('utf-8')\n s3.download_file(input_bucket_name, input_file, input_file)\n output_file = os.path.join(output_dir, os.path.splitext(input_file)[0]+'.csv')\n parse_patient_data(input_file, output_file)\n upload_data(output_file)\n cleanup_files(input_file, output_file)\n except:\n message.change_visibility(VisibilityTimeout=0)\n continue\n else:\n message.delete()", "def get_records(self, limit=None):\n #OK_TO_SEND_RECORD = {'state': 'oktosend', 'volume': 'oktosend'}\n checkpoint = self.checkpoint\n where_clauses = []\n # DB uses local time -> checkpoint and all timestamps are in local time\n start_time = None\n # initialized anyway below - end_time = datetime.datetime.now()\n ok_to_send_time = None\n end_by_limit = False\n\n if checkpoint:\n start_time = checkpoint.date()\n retrieved_previous_end_time = checkpoint.aux()\n checkpoint_interval = checkpoint.transaction()\n if checkpoint_interval is None:\n checkpoint_interval = 0\n else:\n checkpoint_interval = int(checkpoint_interval)\n if checkpoint_interval == 0:\n # NO interval, OK to send from the beginning\n DebugPrint(4, \"Sending OK_TO_SEND - no interval in checkpoint (%s/%s)\" %\n (start_time, checkpoint_interval))\n yield EnstoreTapeDriveInput.OK_TO_SEND_RECORD\n else:\n estimated_previous_end_time = timeutil.wind_time(start_time, seconds=checkpoint_interval,\n backward=False)\n if retrieved_previous_end_time is not None and \\\n retrieved_previous_end_time != estimated_previous_end_time:\n DebugPrint(4, \"End_time in checkpoint not matching: estimated:%s, retrieved:%s\" %\n (estimated_previous_end_time, retrieved_previous_end_time))\n ok_to_send_time = estimated_previous_end_time\n DebugPrint(4, \"Loaded checkpoint: %s (-%s), %s (%s - %s)\" %\n (start_time, self.rollback, ok_to_send_time, checkpoint_interval, retrieved_previous_end_time))\n if self.rollback > 0:\n start_time = timeutil.wind_time(start_time, seconds=self.rollback)\n where_clauses.append(\"start >= '%s'\" % timeutil.format_datetime(start_time, iso8601=False))\n else:\n # NO Checkpoint - OK to send from the beginning\n DebugPrint(4, \"Sending OK_TO_SEND - no checkpoint\")\n yield EnstoreTapeDriveInput.OK_TO_SEND_RECORD\n\n if limit > 0:\n end_time = timeutil.wind_time(start_time, hours=limit, backward=False)\n # If input_delay is 0, check that the end_time is not in the future\n delay_time = timeutil.wind_time(datetime.datetime.now(), seconds=self.input_delay)\n if end_time > delay_time:\n end_time = delay_time\n else:\n end_by_limit = True\n #end_time = min(end_time, timeutil.wind_time(datetime.datetime.now(), seconds=self.input_delay))\n else:\n end_time = timeutil.wind_time(datetime.datetime.now(), seconds=self.input_delay)\n if checkpoint or limit or self.input_delay>0:\n end_time = timeutil.at_minute(end_time)\n where_clauses.append(\"start < '%s'\" % timeutil.format_datetime(end_time, iso8601=False))\n if ok_to_send_time is not None and ok_to_send_time >= timeutil.wind_time(end_time, seconds=60):\n # If ok_to_send_time is not None, then checkpoint_interval was assigned (in checkpoint block)\n DebugPrint(2, \"End time comes before new records are encountered (%s - %s <= 60sec)\" %\n (end_time, ok_to_send_time))\n if end_by_limit:\n DebugPrint(2, \"To avoid misinterpreting records DataLengthMax in the config file must be > %s\" %\n (checkpoint_interval/3600))\n else:\n DebugPrint(2, \"Either the probe runs too frequently or DataLengthMax may be too short. \"\n \"Current interval (hours):\" %\n (checkpoint_interval/3600))\n if start_time is not None:\n if self.input_min_interval > 0:\n if start_time > timeutil.wind_time(end_time, seconds=self.input_min_interval):\n return\n else:\n if start_time >= end_time:\n return\n if where_clauses:\n where_sql = \"WHERE %s\" % \" AND \".join(where_clauses)\n else:\n where_sql = \"\"\n\n sql = '''SELECT\n node,\n volume,\n type,\n logname,\n start,\n finish,\n state,\n storage_group,\n reads,\n writes\n FROM tape_mounts\n %s\n ORDER BY start, storage_group\n ''' % (where_sql, )\n\n DebugPrint(4, \"Requesting new EnstoreTapeDrive records %s\" % sql)\n last_record_start_time = None\n first_record_start_time = None\n first_record = None\n mount_checkpoint = {}\n for r in self.query(sql):\n # Filter out values that are not acceptable\n if r['storage_group'] is None:\n continue\n if r['state'] not in ('M', 'D'):\n continue\n if ok_to_send_time is not None:\n # if ok_to_send_time is not None, checkpoint is True\n if r['start'] >= ok_to_send_time:\n # send also the current record (yield is after)\n # Time intervals are closed on the left (start) and open on the right (end)\n yield EnstoreTapeDriveInput.OK_TO_SEND_RECORD\n # to send the record OK_TO_SEND_RECORD only once\n ok_to_send_time = None\n yield r\n if checkpoint:\n state = r['state']\n last_record_start_time = r['start'] # using start because finish could be NULL\n if first_record is None:\n first_record = r\n first_record_start_time = last_record_start_time\n if state == 'M':\n mount_checkpoint[r['volume']] = last_record_start_time\n elif state == 'D':\n mount_checkpoint[r['volume']] = None\n\n if last_record_start_time is not None and end_time is not None:\n # Looking 6mo before 4.27.2015 there are an average of 167 M od R records per hour\n if timeutil.wind_time(end_time, minutes=10) > last_record_start_time:\n DebugPrint(3, \"Warning, no records in the last 10 min of EnstoreTapeDrive probe (%s - %s)\" %\n (end_time, last_record_start_time))\n\n if checkpoint:\n # first_unresolved_mount if any should be before end_time\n first_unresolved_mount = end_time\n for i in mount_checkpoint.values():\n if i is not None and i < first_unresolved_mount:\n # If there are records (exist a i not None), first record variables are guaranteed not None\n if i > first_record_start_time or not end_by_limit:\n # it is past the first record or the time span is shorter than DataLengthMax\n # this guarantees that the new invocation will may have more records\n first_unresolved_mount = i\n else:\n # skip i for checkpoint consideration\n DebugPrint(2,\n \"Warning, reached DataLengthMax while the first mount record is still not matched.\\n\"\n \"Sending oktosend, a fake dismount record and advancing the checkpoint past it\")\n yield EnstoreTapeDriveInput.OK_TO_SEND_RECORD\n yield self.get_dismount_mount_record(first_record, end_time)\n\n if first_unresolved_mount == end_time:\n checkpoint_interval = 0\n else:\n checkpoint_interval = timeutil.total_seconds(end_time-first_unresolved_mount)\n DebugPrint(4, \"Saving new EnstoreTapeDrive checkpoint: %s - %s (%s)\" %\n (first_unresolved_mount, end_time, checkpoint_interval))\n checkpoint.set_date_transaction_aux(first_unresolved_mount, checkpoint_interval, end_time)", "def test_block_bad_batch(self):\n pass", "def _buff_split(self, upload_buffer):\n if upload_buffer.intent_count() == 0:\n return\n tail_buffer = upload_buffer\n while True:\n if tail_buffer.length < self.recommended_upload_part_size + self.min_part_size:\n # `EmergePlanner_buff_partition` can split in such way that tail part\n # can be smaller than `min_part_size` - to avoid unnecessary download of possible\n # incoming copy intent, we don't split further\n yield tail_buffer\n return\n head_buff, tail_buffer = self._buff_partition(tail_buffer)\n yield head_buff", "def yield_chunked_events(self, events):\n for i in range(0, len(events), 5000):\n yield events[i:i + 5000]", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def _strip_successful_records(cls, messages, response):\n success_ids = {\n item['Id'] for item in response['Successful']\n }\n\n LOGGER.info('Removing sucessful message indices from batch: %s', success_ids)\n\n for success_id in success_ids:\n # Get the successful message by ID and remove it\n message = cls._extract_message_by_id(messages, success_id)\n if not message:\n continue\n messages.remove(message)", "def process( self, message ) :\n try:\n spot_request_msg = SpotRequestMsg( raw_json=message.get_body() )\n spot_request_uuid = spot_request_msg.spot_request_uuid\n spot_master_uuid = spot_request_msg.spot_master_uuid\n logger.info( fmt_request_uuid_msg_hdr( spot_request_uuid ) + 'process() for spot_master_uuid: ' + spot_master_uuid )\n spot_request_item = get_spot_request_item( self.spot_request_table_name, spot_request_msg.spot_request_uuid, region_name=self.region_name, profile_name=self.profile_name )\n ts_cmd_complete = spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_BATCH_PROCESS_COMPLETE_TIMESTAMP]\n cmd_exception_message = spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_INSTANCE_BATCH_PROCESS_START_EXCEPTION_MESSAGE]\n cmd_exception_traceback = spot_request_msg.name_value_pairs[ SpotRequestMsg.PAIR_NAME_INSTANCE_BATCH_PROCESS_START_EXCEPTION_TRACEBACK]\n key_value_pairs = {\n TableSpotRequest.is_open:0,\n TableSpotRequest.spot_request_state_code:SpotRequestStateCode.instance_complete_exception,\n TableSpotRequest.ts_cmd_complete:ts_cmd_complete,\n TableSpotRequest.cmd_exception_message:cmd_exception_message,\n TableSpotRequest.cmd_exception_traceback:cmd_exception_traceback,\n }\n spot_request_row_partial_save( self.spot_request_table_name, spot_request_item, key_value_pairs, region_name=self.region_name, profile_name=self.profile_name )\n self.spot_request_sqs_message_durable.delete_message(message) \n\n except StandardError as e:\n logger.error( fmt_request_uuid_msg_hdr( spot_request_uuid ) + 'Exiting SpotRequestDispatcher due to exception' )\n logger.error( fmt_request_uuid_msg_hdr( spot_request_uuid ) + str(e) )\n logger.error( fmt_request_uuid_msg_hdr( spot_request_uuid ) + traceback.format_exc() )", "def push_bq_records(client, dataset, table, records, sleep = 300, max_batch = 100, print_failed_records = True, retry_on_fail = True):\n if len(records) == 0:\n return\n if len(records) > max_batch:\n split = len(records) // 2\n push_bq_records(client, dataset, table, records[0:split], sleep, max_batch)\n push_bq_records(client, dataset, table, records[split:], sleep, max_batch)\n else:\n try:\n succ = client.push_rows(dataset, table, records)\n if not succ:\n if retry_on_fail:\n print(\"Push to BigQuery table was unsuccessful. Waiting %s seconds and trying one more time.\" % sleep)\n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch, print_failed_records, False)\n else:\n if print_failed_records:\n print(\"\\nRecord 0:\")\n print(records[0])\n if len(records) > 1:\n print(\"\\nRecord %s:\" % (len(records) - 1))\n print(records[len(records)-1])\n raise RuntimeError('Push to BigQuery table was unsuccessful. See above for sample record(s) if requested.')\n except BrokenPipeError:\n print(\"BrokenPipeError while pushing %s records. Waiting %s seconds and trying again.\" % (len(records), sleep)) \n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch)", "def split_records(self, data):\n byte_array = bytearray(data)\n size = len(byte_array)\n split_data = [bytearray()]\n for index, byte in enumerate(byte_array):\n if index != size-1 and byte == 143 and byte_array[index+1] == 142:\n print(\"found delimeter byte 143,142 b'8f8e'\")\n split_data[-1].append(byte)\n split_data.append(bytearray())\n print(\"start new record\")\n else:\n split_data[-1].append(byte)\n return split_data", "def getBatchSize(self, context, obj):\n return 100", "def static_batch(data, batch_size=16):\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= batch_size:\n yield buf\n buf = []\n if len(buf) > 0:\n yield buf", "def test_block_missing_batch(self):\n pass", "def _send_batch(self, base_url, endpoint, batch, dataset_id=None, dataset_version=None, retries=0):\n try:\n params = {'data': base64.b64encode(json.dumps(batch).encode()).decode()}\n if dataset_id:\n params['dataset_id'] = dataset_id\n params['token'] = self.token\n if dataset_version:\n params['dataset_version'] = dataset_version\n response = self.request(base_url, [endpoint], params, 'POST')\n msg = \"Sent \" + str(len(batch)) + \" items on \" + time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"!\"\n Mixpanel.LOGGER.debug(msg)\n return response\n except BaseException as be:\n Mixpanel.LOGGER.debug('Exception in _send_batch')\n Mixpanel.LOGGER.debug(be)\n Mixpanel.LOGGER.warning(\"Failed to import batch, dumping to file import_backup.txt\")\n with open('import_backup.txt', 'a+') as backup:\n json.dump(batch, backup)\n backup.write('\\n')", "def pack_data_into_batches(self, ids):\n\n # create buckets sorted by the number of src tokens\n # each bucket is also sorted by the number of tgt tokens\n buckets = {}\n for i, line_ids in enumerate(ids):\n len_ = len(line_ids)\n if len_ not in buckets:\n buckets[len_] = [i]\n else:\n buckets[len_].append(i)\n\n for b_idx in buckets:\n buckets[b_idx] = sorted(buckets[b_idx])\n\n buckets = OrderedDict(sorted(buckets.items()))\n\n batches = []\n batch_elem_lengths = []\n curr_batch = []\n len_of_longest_sent = 0\n for sent_len, bucket in buckets.items():\n for sent_i in bucket:\n if sent_len * (len(curr_batch) + 1) > self.tokens_in_batch:\n if not curr_batch:\n raise ValueError(\n f\"The limitation on number of tokens in batch {self.tokens_in_batch} is too strong.\"\n f\"Several sentences contain {sent_len} tokens.\"\n )\n batches.append(curr_batch)\n batch_elem_lengths.append(sent_len)\n curr_batch = []\n curr_batch.append(sent_i)\n len_of_longest_sent = sent_len\n if curr_batch:\n batches.append(curr_batch)\n batch_elem_lengths.append(len_of_longest_sent)\n return batches, batch_elem_lengths", "def test_batch_size(self):\n\n class A(Document):\n s = StringField()\n\n A.drop_collection()\n\n for i in range(100):\n A.objects.create(s=str(i))\n\n # test iterating over the result set\n cnt = 0\n for _ in A.objects.batch_size(10):\n cnt += 1\n assert cnt == 100\n\n # test chaining\n qs = A.objects.all()\n qs = qs.limit(10).batch_size(20).skip(91)\n cnt = 0\n for _ in qs:\n cnt += 1\n assert cnt == 9\n\n # test invalid batch size\n qs = A.objects.batch_size(-1)\n with pytest.raises(ValueError):\n list(qs)", "def midbatch_hook(self, progress, logging_epoch):\n pass", "def _on_too_many_orders(self, msg):\r\n self.debug(\"### Server said: '%s\" % msg[\"message\"])\r\n self.count_submitted -= 1\r\n self.signal_order_too_fast(self, msg)", "def wifi_scanner_batch_scan_full(self, scan_setting):\n self.dut.ed.clear_all_events()\n data = wutils.start_wifi_background_scan(self.dut, scan_setting)\n idx = data[\"Index\"]\n scan_rt = data[\"ScanElapsedRealtime\"]\n self.log.info(\"Wifi batch shot scan started with index: %s\", idx)\n #generating event wait time from scan setting plus leeway\n scan_time, scan_channels = wutils.get_scan_time_and_channels(\n self.wifi_chs, scan_setting, self.stime_channel)\n # multiply scan period by two to account for scheduler changing period\n scan_time += scan_setting[\n 'periodInMs'] * 2 #add scan period delay for next cycle\n wait_time = scan_time / 1000 + self.leeway\n validity = False\n try:\n for snumber in range(1, 3):\n results = []\n event_name = \"%s%sonResults\" % (EVENT_TAG, idx)\n self.log.debug(\"Waiting for event: %s for time %s\", event_name,\n wait_time)\n event = self.dut.ed.pop_event(event_name, wait_time)\n self.log.debug(\"Event received: %s\", event)\n bssids, validity = self.proces_and_valid_batch_scan_result(\n event[\"data\"][\"Results\"], scan_rt, event[\"data\"][KEY_RET],\n scan_setting)\n event_name = \"%s%sonFullResult\" % (EVENT_TAG, idx)\n results = self.pop_scan_result_events(event_name)\n asserts.assert_true(\n len(results) >= bssids,\n \"Full single shot result don't match %s\" % len(results))\n asserts.assert_true(bssids > 0, EMPTY_RESULT)\n asserts.assert_true(validity, INVALID_RESULT)\n except queue.Empty as error:\n raise AssertionError(\"Event did not triggered for batch scan %s\" %\n error)\n finally:\n self.dut.droid.wifiScannerStopBackgroundScan(idx)\n self.dut.ed.clear_all_events()", "def _get_message_groups(\n self, messages: Iterator[AirbyteMessage], schema_inferrer: SchemaInferrer, limit: int\n ) -> Iterable[Union[StreamReadPages, AirbyteLogMessage]]:\n records_count = 0\n at_least_one_page_in_group = False\n current_page_records = []\n current_slice_pages = []\n current_page_request: Optional[HttpRequest] = None\n current_page_response: Optional[HttpResponse] = None\n\n while records_count < limit and (message := next(messages, None)):\n if self._need_to_close_page(at_least_one_page_in_group, message):\n self._close_page(current_page_request, current_page_response, current_slice_pages, current_page_records)\n current_page_request = None\n current_page_response = None\n\n if at_least_one_page_in_group and message.type == Type.LOG and message.log.message.startswith(\"slice:\"):\n yield StreamReadSlices(pages=current_slice_pages)\n current_slice_pages = []\n at_least_one_page_in_group = False\n elif message.type == Type.LOG and message.log.message.startswith(\"request:\"):\n if not at_least_one_page_in_group:\n at_least_one_page_in_group = True\n current_page_request = self._create_request_from_log_message(message.log)\n elif message.type == Type.LOG and message.log.message.startswith(\"response:\"):\n current_page_response = self._create_response_from_log_message(message.log)\n elif message.type == Type.LOG:\n yield message.log\n elif message.type == Type.RECORD:\n current_page_records.append(message.record.data)\n records_count += 1\n schema_inferrer.accumulate(message.record)\n else:\n self._close_page(current_page_request, current_page_response, current_slice_pages, current_page_records)\n yield StreamReadSlices(pages=current_slice_pages)", "def _export_batch(self) -> int:\n idx = 0\n # currently only a single thread acts as consumer, so queue.pop() will\n # not raise an exception\n while idx < self.max_export_batch_size and self.queue:\n self.spans_list[idx] = self.queue.pop()\n idx += 1\n token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))\n try:\n # Ignore type b/c the Optional[None]+slicing is too \"clever\"\n # for mypy\n self.span_exporter.export(self.spans_list[:idx]) # type: ignore\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Exception while exporting Span batch.\")\n detach(token)\n\n # clean up list\n for index in range(idx):\n self.spans_list[index] = None\n return idx", "def get_batch(self, data, batch_size):\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = self.simple_batch_size_fn(ex, len(minibatch))\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], self.simple_batch_size_fn(ex, 1)\n if minibatch:\n yield minibatch", "def test_block_extra_batch(self):\n pass", "def getFilteredSyncChunk(self, authenticationToken, afterUSN, maxEntries, filter):\r\n pass", "def produce_query_batches(self):\n pass", "def import_queued_submissions(conn, limit=50):\n query = schema.submission.select(schema.submission.c.handled == False).limit(limit)\n count = 0\n for submission in conn.execute(query):\n import_submission(conn, submission)\n count += 1\n logger.debug(\"Imported %d submissions\", count)", "def process_batch(sm_account_id, graph, interactions, batch_requests, p_session, processed_interactions=None,\n cutoff=None):\n with transaction.manager:\n for interaction in interactions:\n p_session.merge(interaction)\n\n if len(batch_requests) == 0 or (processed_interactions and processed_interactions >= cutoff):\n return\n\n # process batch requests\n # Number of max items in a batch request is 50\n MAX_BATCH_SIZE = 50\n batch_requests_p = [{'method': req.get('method'), 'relative_url': req.get('relative_url')} for req in\n batch_requests]\n batch_data = []\n\n interactions_new = set()\n batch_requests_new = []\n\n for i in range(math.ceil(len(batch_requests_p) / MAX_BATCH_SIZE)):\n # TODO handle connection error. attempt retries\n try:\n batch_req = json.dumps(batch_requests_p[i * MAX_BATCH_SIZE:(i * MAX_BATCH_SIZE) + (MAX_BATCH_SIZE - 1)],\n indent=1)\n batch_data += graph.request(\"\", post_args={\n 'batch': batch_req})\n\n except ConnectionError as e:\n logger.exception('unable to process batch request \\n:{}'.format(batch_req))\n for req, batch_response in zip(batch_requests, batch_data):\n parent_id = req.get('parent_id')\n if 'body' in batch_response:\n batch_response_data = json.loads(batch_response['body'])\n if 'error' in batch_response_data and batch_response_data['error'].get('code') == 1:\n # handle request failure - 'Please reduce the amount of data you are asking for, then retry your request'\n error_url = req.get('relative_url')\n parse_result = urlparse(error_url)\n query_data = urlparse.parse_qs(parse_result.query)\n old_limit = query_data.get('limit')[0]\n sm_account_id = parse_result.path.split(\"/\")[2]\n new_limit = int(float(old_limit) / 2)\n new_req = get_feed_request(sm_account_id, limit=new_limit)\n batch_requests_new.append(new_req)\n\n if 'data' in batch_response_data:\n for interaction_raw in batch_response_data['data']:\n Interactions.get_nested_interactions(sm_account_id, interaction_raw, interactions_new,\n batch_requests_new, parent_id)\n if 'paging' in batch_response_data and 'next' in batch_response_data['paging']:\n next_url = urlparse(batch_response_data['paging']['next'])\n relative_url = next_url.path + '?' + next_url.query + '&include_headers=false'\n req = {'method': 'GET', 'relative_url': relative_url, 'parent_id': parent_id}\n batch_requests_new.append(req)\n else:\n logger.info('Exception occurred while collecting posts for {} skipping this..'.format(sm_account_id))\n\n process_batch(sm_account_id, graph, interactions_new, batch_requests_new, p_session,\n processed_interactions + len(interactions), cutoff)", "def batch_size(self) -> int:\n ...", "def generate(\n stream_name, field, hotspot_size, hotspot_weight, batch_size, kinesis_client):\n points_generated = 0\n hotspot = None\n while True:\n if points_generated % 1000 == 0:\n hotspot = get_hotspot(field, hotspot_size)\n records = [\n get_record(field, hotspot, hotspot_weight) for _ in range(batch_size)]\n points_generated += len(records)\n pprint(records)\n kinesis_client.put_records(StreamName=stream_name, Records=records)\n\n time.sleep(0.1)", "def _batcher(self, rows):\n row_count = 0\n batch = []\n batch_count = 1\n\n total_rows_modified = 0\n throttle_count = 0\n\n i = 0\n for row in rows:\n if row_count > self.batch_size - 1:\n logger.debug(f\"row_count={row_count} batch_size={self.batch_size} and batch={len(batch)}\")\n # Yield the previous batch\n yield batch\n\n # Start the new batch\n batch = []\n batch.append(row)\n row_count = 1\n\n batch_count += 1\n # break # toggle to load one batch only\n else:\n row_count += 1\n batch.append(row)\n\n # Put in a sleep timer to throttle how hard we hit the database\n if self.throttle_time and self.throttle_size and (throttle_count > self.throttle_size - 1):\n logger.info(f\"Sleeping for {self.throttle_time} seconds... row: {i}\")\n time.sleep(int(self.throttle_time))\n throttle_count = 0\n elif self.throttle_time and self.throttle_size:\n throttle_count += 1\n i += 1\n\n yield batch", "def getBatchSize(self, context, obj):\n return 10", "def beat_inbox_email_bulk():\n receipt_id_email, list_of_email_notifications = email_bulk.poll()\n\n while list_of_email_notifications:\n save_emails.apply_async((None, list_of_email_notifications, receipt_id_email), queue=QueueNames.BULK_DATABASE)\n current_app.logger.info(f\"Batch saving with Bulk Priority: email receipt {receipt_id_email} sent to in-flight.\")\n receipt_id_email, list_of_email_notifications = email_bulk.poll()", "def batches(data, batch_size) -> list:\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv", "def test_wifi_scanner_batch_scan_period_too_short(self):\n scan_setting = {\"band\": wutils.WifiEnums.WIFI_BAND_BOTH_WITH_DFS,\n \"periodInMs\": 5000,\n \"reportEvents\":\n wutils.WifiEnums.REPORT_EVENT_AFTER_BUFFER_FULL}\n self.start_wifi_scanner_background_scan_expect_failure(scan_setting)", "def logRecordHandler(self, logrecord):\n logrecords = self._logrecords\n logrecords.append(logrecord)\n if len(logrecords) > self._queuesize:\n logrecords.pop(0)\n self._logRecordsTotal += 1", "def _buffer(self, n=None):\n if self._out_of_scope:\n raise ResultConsumedError(self, _RESULT_OUT_OF_SCOPE_ERROR)\n if self._consumed:\n raise ResultConsumedError(self, _RESULT_CONSUMED_ERROR)\n if n is not None and len(self._record_buffer) >= n:\n return\n record_buffer = deque()\n for record in self:\n record_buffer.append(record)\n if n is not None and len(record_buffer) >= n:\n break\n if n is None:\n self._record_buffer = record_buffer\n else:\n self._record_buffer.extend(record_buffer)\n self._exhausted = not self._record_buffer", "def next_batch(self, batch_size=8):\n raise NotImplementedError()", "def test_bound_size_of_output_queue_size_reader(synthetic_dataset):\n TIME_TO_GET_TO_STATIONARY_STATE = 0.5\n\n with make_reader(synthetic_dataset.url, reader_pool_type='process', workers_count=1) as reader:\n assert 0 == reader.diagnostics['items_produced']\n next(reader)\n # Verify that we did not consume all rowgroups (should be 10) and ventilator throttles number of ventilated\n # items\n sleep(TIME_TO_GET_TO_STATIONARY_STATE)\n assert reader.diagnostics['items_consumed'] < 5\n assert reader.diagnostics['items_inprocess'] < 5", "def send(self, payloads):\n records = self._payload_messages(payloads)\n\n # SQS only supports up to 10 messages so do the send in batches\n for message_batch in self._message_batches(records):\n response = self._send_messages(message_batch)\n self._finalize(response, message_batch)", "def write_batch(self, batch):\n for item in batch:\n self.write_buffer.buffer(item)\n key = self.write_buffer.get_key_from_item(item)\n if self.write_buffer.should_write_buffer(key):\n self._write_current_buffer_for_group_key(key)\n self.increment_written_items()\n self._check_items_limit()", "def calculateChunkSize(size, record_count, splits):\n avg_record_size = size / record_count\n logging.info(\n \"Avg record size: %0.02f=%d/%d\" %\n (avg_record_size, size, record_count))\n chunk = floor(ceil(size / (splits * avg_record_size)) * avg_record_size)\n\n logging.info(\n \"Setting chunk to: %d=floor(ceil(%d/(%d*%0.02f))*%0.02d)\" %\n (chunk, size, splits, avg_record_size, avg_record_size))\n return chunk", "def consume_data(self, data):\n # Get parameters\n logger_manager = data['logger_manager']\n doc_m = data['document_manager']\n message_id = data['message_id']\n documents = data['documents']\n to_remove_queue = data['to_remove_queue']\n duplicates = no_requestInTs = 0\n hash_set = set()\n\n for current_document in documents:\n\n # Mark to removal documents without requestInTs immediately (as of bug in xRoad software ver 6.22.0)\n if current_document['requestInTs'] is None and current_document['securityServerType'] is None:\n to_remove_queue.put(current_document['_id'])\n no_requestInTs += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('no_requestInTs',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Check if is batch duplicated\n current_document_hash = doc_m.calculate_hash(current_document)\n if current_document_hash in hash_set:\n # If yes, mark to removal\n to_remove_queue.put(current_document['_id'])\n duplicates += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('batch_duplicated',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Check if is database duplicated\n if self.db_m.check_if_hash_exists(current_document_hash):\n # If here, add to batch duplicate cache\n hash_set.add(current_document_hash)\n duplicates += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('database_duplicated',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Mark hash as seen\n hash_set.add(current_document_hash)\n # Find possible matching documents\n matching_documents = self.db_m.find_by_message_id(current_document)\n # Try to match the current document with possible pairs (regular)\n merged_document = doc_m.find_match(current_document, matching_documents)\n matching_type = ''\n\n if merged_document is None:\n # Try to match the current document with orphan-matching\n merged_document = doc_m.find_orphan_match(current_document, matching_documents)\n if merged_document is not None:\n matching_type = 'orphan_pair'\n else:\n matching_type = 'regular_pair'\n\n if merged_document is None:\n matching_type = 'orphan'\n if current_document['securityServerType'] == 'Producer':\n new_document = doc_m.create_json(None, current_document, None, current_document_hash, message_id)\n else:\n if current_document['securityServerType'] != 'Client':\n current_document['securityServerType'] = 'Client'\n new_document = doc_m.create_json(current_document, None, current_document_hash, None, message_id)\n\n new_document = doc_m.apply_calculations(new_document)\n new_document['correctorTime'] = database_manager.get_timestamp()\n new_document['correctorStatus'] = 'processing'\n new_document['matchingType'] = matching_type\n\n # Mark non-xRoad queries as 'done' instantly. No reason to wait matching pair\n if 'client' in new_document and new_document['client'] is not None and 'clientXRoadInstance' in new_document['client'] \\\n and new_document['client']['clientXRoadInstance'] is None:\n new_document['correctorStatus'] = 'done'\n new_document['matchingType'] = 'orphan'\n\n self.db_m.add_to_clean_data(new_document)\n\n else:\n\n if current_document['securityServerType'] == 'Client':\n\n if merged_document['client'] is None:\n merged_document['client'] = current_document\n merged_document = doc_m.apply_calculations(merged_document)\n merged_document['clientHash'] = current_document_hash\n merged_document['correctorTime'] = database_manager.get_timestamp()\n merged_document['correctorStatus'] = 'done'\n merged_document['matchingType'] = matching_type\n self.db_m.update_document_clean_data(merged_document)\n else:\n # This should never-ever happen in >= v0.4.\n msg = '[{0}] 2 matching clients for 1 producer: {1}'.format(self.worker_name, current_document)\n logger_manager.log_warning('corrector_merging', msg)\n\n else:\n\n if merged_document['producer'] is None:\n merged_document['producer'] = current_document\n merged_document = doc_m.apply_calculations(merged_document)\n merged_document['producerHash'] = current_document_hash\n merged_document['correctorTime'] = database_manager.get_timestamp()\n merged_document['correctorStatus'] = 'done'\n merged_document['matchingType'] = matching_type\n self.db_m.update_document_clean_data(merged_document)\n else:\n # This should never-ever happen in >= v0.4.\n msg = '[{0}] 2 matching producers for 1 client: {1}'.format(self.worker_name, current_document)\n logger_manager.log_error('corrector_merging', msg)\n\n self.db_m.mark_as_corrected(current_document)\n\n if no_requestInTs:\n msg = '[{0}] {1} document(s) without requestInTs present'.format(self.worker_name, no_requestInTs)\n logger_manager.log_warning('corrector_no_requestInTs', msg)\n\n return duplicates", "def _export(self, flush_request: typing.Optional[_FlushRequest]):\n if not flush_request:\n self._export_batch()\n return\n\n num_spans = flush_request.num_spans\n while self.queue:\n num_exported = self._export_batch()\n num_spans -= num_exported\n\n if num_spans <= 0:\n break", "def _one_mini_batch(self, data, indices, pad_id):\n batch_data = {'raw_data': [data[i] for i in indices],\n 'question_token_ids': [],\n 'question_length': [],\n 'passage_token_ids': [],\n 'passage_length': [],\n 'start_id': [],\n 'end_id': []}\n max_passage_num = max([len(sample['passages']) for sample in batch_data['raw_data']])\n max_passage_num = min(self.max_p_num, max_passage_num)\n for sidx, sample in enumerate(batch_data['raw_data']):\n for pidx in range(max_passage_num):\n if pidx < len(sample['passages']):\n batch_data['question_token_ids'].append(sample['question_token_ids'])\n batch_data['question_length'].append(len(sample['question_token_ids']))\n passage_token_ids = sample['passages'][pidx]['passage_token_ids']\n batch_data['passage_token_ids'].append(passage_token_ids)\n batch_data['passage_length'].append(min(len(passage_token_ids), self.max_p_len))\n else:\n batch_data['question_token_ids'].append([])\n batch_data['question_length'].append(0)\n batch_data['passage_token_ids'].append([])\n batch_data['passage_length'].append(0)\n batch_data, padded_p_len, padded_q_len = self._dynamic_padding(batch_data, pad_id)\n for sample in batch_data['raw_data']:\n if 'answer_passages' in sample and len(sample['answer_passages']):\n gold_passage_offset = padded_p_len * sample['answer_passages'][0]\n batch_data['start_id'].append(gold_passage_offset + sample['answer_spans'][0][0])\n batch_data['end_id'].append(gold_passage_offset + sample['answer_spans'][0][1])\n else:\n # fake span for some samples, only valid for testing\n batch_data['start_id'].append(0)\n batch_data['end_id'].append(0)\n return batch_data", "def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r", "def test_batch_upload(\n large_upload_collection: UploadCollection,\n fake_session: HexpySession,\n caplog: CaptureFixture,\n) -> None:\n responses.add(\n responses.POST, HexpySession.ROOT + \"content/upload\", json={}, status=200\n )\n\n client = ContentUploadAPI(fake_session)\n\n with caplog.at_level(logging.INFO):\n response = client.upload(\n document_type=123456789, items=large_upload_collection, request_usage=True\n )\n\n assert (\n caplog.records[0].msg\n == \"More than 1000 items found. Uploading in batches of 1000.\"\n )\n\n assert response == {\"Batch 0\": {}, \"Batch 1\": {}, \"Batch 2\": {}, \"Batch 3\": {}}", "def queue_emails(spoofs, message, count):\n\t\tqueues = []\n\t\tnumber_of_spoofs = len(spoofs)\n\t\tmessages_per_queue = count // number_of_spoofs\n\t\textra_to_distribute = count - (messages_per_queue * number_of_spoofs)\n\t\tbatch = Batch(size=count, complete=0)\n\t\tbatch.save()\n\t\tpk = batch.pk\n\n\t\t# going deep into each queue\n\t\tfor x in range(number_of_spoofs):\n\n\t\t\tspoof = spoofs[x]\n\t\t\tmessage['From'] = spoof.username\n\t\t\tqueue = Queue(spoof.username, connection=Redis())\n\t\t\tqueues.append(queue)\n\n\t\t\tfor y in range(messages_per_queue):\n\t\t\t\tqueue.enqueue_call(func=send, args=spoof.task_arguments + (message, pk))\n\n\t\t# panning across each queue\n\t\tfor x in range(extra_to_distribute):\n\t\t\tspoof = spoofs[x]\n\t\t\tmessage['From'] = spoof.username\n\t\t\tqueue = queues[x]\n\t\t\tqueue.enqueue_call(func=send ,args=(spoof.task_arguments + (message, pk)))\n\n\t\treturn pk", "def _drain_queue(self):\n while self.queue:\n self._export_batch()", "def next_batch(self, batch_size):\r\n raise NotImplementedError", "def adjust_batch_size(self, adjustment_factor):\n target_batch_size = adjustment_factor + self.batch_size\n if target_batch_size < 1: # Don't go below one page.\n self.batch_size = 1\n elif target_batch_size > self.max_threads: # Don't exceed max_threads.\n self.batch_size = self.max_threads\n else:\n self.batch_size = target_batch_size\n return", "def test_bulk_round_trip_with_backoff(self):\n self._test_bulk_round_trip(nodes=3, partitioner=\"murmur3\", num_operations=250000,\n copy_from_options={'MAXINFLIGHTMESSAGES': 64, 'MAXPENDINGCHUNKS': 1})", "def get_batch_save_limit(session_key):\n limits = get_limits('kvstore', session_key)\n NotableEventUpdate.BATCH_SAVE_LIMIT = limits.max_documents_per_batch_save", "def _paging_msg_sent_callback(self, future):\n err = future.exception()\n if err:\n if isinstance(err, grpc.RpcError) and indicates_connection_error(err):\n self.logger.error(\n \"Couldn't send flow records to sessiond, connection error\",\n extra=EXCLUDE_FROM_ERROR_MONITORING,\n )\n else:\n self.logger.error(\"Couldn't send flow records to sessiond: %s\", err)\n return", "def _check_batch_size(self, data_list):\n if self.api_info is None:\n self.get_info() # sets the image size and other such info from server.\n MAX_BATCH_SIZE = self.api_info['max_batch_size']\n if len(data_list) > MAX_BATCH_SIZE:\n raise ApiError((\"Number of images provided in bach %d is greater than maximum allowed per \"\n \"request %d\") % (len(data_list), MAX_BATCH_SIZE))", "def delete_messages(self):\n if not self.processed_messages:\n LOGGER.error('No processed messages to delete')\n return\n\n while self.processed_messages:\n len_processed_messages = len(self.processed_messages)\n batch = len_processed_messages if len_processed_messages < 10 else 10\n\n # Delete_batch can only process up to 10 messages\n message_batch = [self.processed_messages.pop() for _ in range(batch)]\n\n resp = self.sqs_client.delete_message_batch(\n QueueUrl=self.athena_sqs_url,\n Entries=[{'Id': message['MessageId'],\n 'ReceiptHandle': message['ReceiptHandle']}\n for message in message_batch]\n )\n LOGGER.info('Successfully deleted %s messages from the queue',\n len(resp['Successful']))", "def run(self, batch_size=20):\n logging.info('%s: Starting.'% (self.__class__.__name__))\n deferred.defer(self._continue, None, batch_size, _queue=self.QUEUE)", "def test_super_chunk(self):\n chunksize = MAX_SINGLE_UPLOAD_SIZE + 1\n size = MAX_SINGLE_UPLOAD_SIZE * 2\n self.assertEqual(find_chunksize(size, chunksize),\n MAX_SINGLE_UPLOAD_SIZE)", "def handler(message):\n records = message.collect()\n list_collect = []\n for record in records:\n # Parse record\n read = json.loads(record[1].decode('utf-8'))\n list_collect.append((read['text'],read['tags']))\n data = (clean(read['text']),read['tags'])\n job = read['index']\n\n data = spark.createDataFrame([data],['cleaned_body','tags'])\n data = model.transform(data)\n d = data.select('features','tags').collect()\n\n keys = retrieve_keys(d[0]['tags'])\n # look to optimize slice length based on keys and throughput\n slice_length = max(len(keys)//10000,min(len(keys)//49,200))\n print(slice_length)\n keys_sliced = [','.join(keys[i:i+slice_length]) for i in range(0,len(keys),slice_length)]\n keys = spark.createDataFrame(keys_sliced, StringType())\n score_udf = udf(lambda r: get_features(r,d[0]['features']), FloatType())\n keys = keys.withColumn('features', score_udf(keys['value'])).collect()\n # need to get top result from zadd\n report_to_redis(job)\n return", "def on_limit(self, track):\n log.debug(\"Received limit notice: %d\", track)", "def on_batch_end(self, batch, logs=None):", "def split(self, bytes):\n # '''Split by lines heroku payload and apply filters.'''\n\n # lines = []\n\n lines = []\n while len(bytes) > 0:\n # find first space character\n i = 0\n while bytes[i] != 32: # 32 is white space in unicode\n i += 1\n msg_len = int(bytes[0:i].decode('utf-8'))\n msg = bytes[i + 1:i + msg_len + 1]\n\n # remove \\n at the end of the line if found\n eol = msg[len(msg)-1]\n if eol == 10 or eol == 13: # \\n or \\r in unicode\n msg = msg[:-1]\n\n decoded_msg = msg.decode('utf-8', 'replace')\n if self.truncate_to > -1:\n # replace token by __TOKEN_REPLACED__\n decoded_msg = self.patternToken.sub(lambda x:\n '{}__TOKEN_REPLACED__{}'\n .format(x.group(1),\n x.group(3)),\n decoded_msg)\n\n max_ = self.truncate_to\n # TRUNCATE Big logs except stack traces\n if not self.patternStackTrace.search(\n decoded_msg) and len(decoded_msg) > max_:\n decoded_msg = '%s __TRUNCATED__ %s' % (\n decoded_msg[:max_//2], decoded_msg[-max_//2:])\n\n lines.append(decoded_msg)\n\n bytes = bytes[i + 1 + msg_len:]\n return lines", "def uploade_how_many_rows_we_want(self, df):\r\n try:\r\n if len(df) > 300000 or df.memory_usage(deep=True).sum() > self.memory:\r\n raise Exception(\"batch request\")\r\n try:\r\n self.insert(df)\r\n \r\n except Exception as ex:\r\n if 'string contains an untranslatable character' in str(ex):\r\n for i in np.where(df.dtypes != np.float)[0]:\r\n df['drop'] = df[df.columns[i]].apply(lambda x: self.is_ascii(x))\r\n l_tmp = (df['drop'][df['drop']].index)\r\n if len(l_tmp) > 0:\r\n print(\"rows remove: \" + str(list(l_tmp)))\r\n df.drop(l_tmp, inplace=True)\r\n df.drop('drop', axis=1, inplace=True)\r\n elif 'batch request' in str(ex) or 'LAN message' in str(ex):\r\n raise Exception(\"batch request\")\r\n else:\r\n print('error')\r\n print(ex)\r\n raise error\r\n self.rows += len(df)\r\n\r\n\r\n except Exception as ex:\r\n if \"batch request\" in str(ex):\r\n \r\n # split the data to 2 dataframes\r\n len_data = math.ceil(len(df)/2)\r\n df1 = df.iloc[:len_data]\r\n df2 = df.iloc[len_data:]\r\n\r\n self.uploade_how_many_rows_we_want(df1)\r\n self.uploade_how_many_rows_we_want(df2)\r\n\r\n\r\n else:\r\n print (ex)\r\n raise error", "def trimstatic_se(records, args):\n for record in records:\n record = record[args.f:len(record) - args.t]\n if args.r:\n record = record.reverse_complement(name=True,id=True,description=True)\n if args.d:\n record.name += '/1'\n record.id += '/1'\n record.description += '/1'\n if len(record) >= args.m and numpy.mean(record.letter_annotations['phred_quality']) >= args.a:\n yield record", "def send_chunked(self, chunks, payload, trailers):\r\n\r\n chunk_list = chunks.split(',')\r\n pointer = 0\r\n for cwidth in chunk_list:\r\n cwidth = int(cwidth)\r\n # send chunk length indicator\r\n self.wfile.write(format(cwidth, 'x').upper() + \"\\r\\n\")\r\n # send chunk payload\r\n self.wfile.write(payload[pointer:pointer + cwidth] + \"\\r\\n\")\r\n pointer += cwidth\r\n\r\n # is there another chunk that has not been configured? Send it anyway for the sake of completeness..\r\n if len(payload) > pointer:\r\n # send chunk length indicator\r\n self.wfile.write(format(len(payload) - pointer, 'x').upper() + \"\\r\\n\")\r\n # send chunk payload\r\n self.wfile.write(payload[pointer:] + \"\\r\\n\")\r\n\r\n # we're done with the payload. Send a zero chunk as EOF indicator\r\n self.wfile.write('0'+\"\\r\\n\")\r\n\r\n # if there are trailing headers :-) we send them now..\r\n for trailer in trailers:\r\n self.wfile.write(\"%s: %s\\r\\n\" % (trailer[0], trailer[1]))\r\n\r\n # and finally, the closing ceremony...\r\n self.wfile.write(\"\\r\\n\")", "def process_sentence_chunks(\n ds: MMapRetrievalIndexedDataset,\n tokenizer,\n chunk_size: int,\n stage: int,\n workers: int,\n shard_id: int,\n total_shards: int,\n):\n total_chunks = ds.chunks\n start = 0\n threshold = 0\n\n if stage == 1:\n start, total_chunks = calculate_start_end(\n total_chunks=total_chunks, total_shards=total_shards, shard_id=shard_id\n )\n logging.info(f'shard_id {shard_id}, create index from chunk {start} to {total_chunks}')\n\n with Pool(workers) as p:\n while start < total_chunks:\n if start / total_chunks > threshold:\n logging.info(f\"sentence processing {start / total_chunks} is done\")\n threshold += 0.1\n slice_id = (start, min(start + chunk_size, total_chunks))\n beg = time.time()\n id_slices = ds.get_chunk(slice(*slice_id), force_no_cont_ids=True)\n end = time.time()\n logging.info(f\"load {chunk_size} chunks takes {end-beg}\")\n start = min(start + chunk_size, total_chunks)\n sentences = p.map(tokenizer.ids_to_text, id_slices)\n end2 = time.time()\n logging.info(f\"tokenize {chunk_size} chunks takes {end2-end}\")\n queue.put((sentences, slice_id))\n queue.put((None, None))", "def _send_pending_messages():\n\n queryset = models.Message.objects.filter(status=models.STATUS_PENDING)\\\n .order_by(\"-priority\", \"created_at\")\n\n connection = _get_real_backend()\n paginator = Paginator(list(queryset), getattr(settings, \"DJMAIL_MAX_BULK_RETRY_SEND\", 10))\n\n for page_index in paginator.page_range:\n connection.open()\n for message_model in paginator.page(page_index).object_list:\n email = message_model.get_email_message()\n sended = connection.send_messages([email])\n\n if sended == 1:\n message_model.status = models.STATUS_SENT\n message_model.sent_at = timezone.now()\n else:\n message_model.retry_count += 1\n\n message_model.save()\n connection.close()", "def batch_process(self, message_list, action, userId='me'):\n\n list_of_ids = []\n\n for key, value in message_list.items():\n list_of_ids.append(value)\n\n chunks = [list_of_ids[x:x+1000] for x in range(0, len(list_of_ids), 1000)]\n\n for page in range(0, len(chunks)):\n if action.lower() == 'archive':\n resource = getattr(self.connection.users().messages(), 'batchModify')\n body = { \n \"ids\": chunks[page],\n \"removeLabelIds\": [\"INBOX\"],\n }\n else:\n resource = getattr(self.connection.users().messages(), 'batchDelete')\n body = { \n \"ids\": chunks[page],\n }\n\n dynamic_request = resource(userId=userId, body=body)\n response = dynamic_request.execute()\n print(f'[√] Bulk Action: SUCCESS {len(chunks[page])} Messages have been {action}d! - {page}')\n print(f'[√] Bulk Action: SUCCESS Total Number of Processed Messages: {len(list_of_ids)}')\n return True", "def RecordBatches(\n self, options: dataset_options.RecordBatchesOptions\n ) -> Iterator[pa.RecordBatch]:", "def _extract_batch(self, data, batch_size):\n\n batch_size = batch_size or BATCH_SIZE\n\n batch = []\n try:\n for i in range(batch_size):\n batch.append(data.next())\n except StopIteration:\n pass\n\n return batch", "def _trunc_lines(self):\n\t\tif self._appendMessages:\n\t\t\tself._trunc_lines_append()\n\t\telse:\n\t\t\tself._trunc_lines_prepend()", "def batch_split(self, batch_text, threads=8):\n pass", "def test_google_storage_small_batch_size(sdc_builder, sdc_executor, gcp):\n\n gcp_file_name = 'test_9_records.xls'\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n bucket_name = get_random_string(ascii_lowercase, 10)\n\n storage_client = gcp.storage_client\n\n google_cloud_storage = pipeline_builder.add_stage('Google Cloud Storage', type='origin')\n\n google_cloud_storage.set_attributes(bucket=bucket_name,\n common_prefix='gcs-test',\n prefix_pattern=gcp_file_name,\n data_format='EXCEL',\n max_batch_size_in_records=1,\n excel_header_option='WITH_HEADER')\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n pipeline_finisher_executor.set_attributes(preconditions=['${record:eventType() == \\'no-more-data\\'}'],\n on_record_error='DISCARD')\n\n google_cloud_storage >= pipeline_finisher_executor\n\n wiretap = pipeline_builder.add_wiretap()\n google_cloud_storage >> wiretap.destination\n\n pipeline = pipeline_builder.build(title='Google Cloud Storage').configure_for_environment(gcp)\n sdc_executor.add_pipeline(pipeline)\n\n created_bucket = gcp.retry_429(storage_client.create_bucket)(bucket_name)\n try:\n blob = created_bucket.blob('gcs-test/' + gcp_file_name)\n blob.upload_from_filename('resources/gcp/' + gcp_file_name)\n\n # Start the pipeline and make sure the timeout is defined, since the original bug involved\n # an infinite loop reading the same file over and over again\n sdc_executor.start_pipeline(pipeline).wait_for_finished(timeout_sec=300)\n\n # Check that the file has been read through exactly once\n output_records = [record.field for record in wiretap.output_records]\n assert len(output_records) == 9\n # Check that no error records were generated\n error_records = [record.field for record in wiretap.error_records]\n assert len(error_records) == 0\n\n finally:\n logger.info('Deleting bucket %s ...', created_bucket.name)\n gcp.retry_429(created_bucket.delete)(force=True)", "def start_chunking(self) -> None:\n self._finished_chunking.clear()\n self._chunks_left = ceil(self.member_count / 1000)", "def _send_data(self):\n \n # Do not send more than 100 datasets each time (totally arbitrary)\n MAX_DATA_SETS_PER_POST = 100\n data_to_send = self._data_buffer[0:MAX_DATA_SETS_PER_POST]\n data_to_keep = self._data_buffer[MAX_DATA_SETS_PER_POST:]\n\n # Prepare data string with the values in data buffer\n now = time.time()\n data_string = '[' \n for (timestamp, data) in data_to_send:\n data_string += '['\n data_string += str(round(timestamp-now,2))\n for sample in data:\n data_string += ','\n data_string += str(sample)\n data_string += '],'\n # Remove trailing comma and close bracket\n data_string = data_string[0:-1]+']'\n\n self._log.debug(\"Data string: \" + data_string)\n \n # Prepare URL string of the form\n # 'http://domain.tld/emoncms/input/bulk.json?apikey=\n # 12345&data=[[-10,10,1806],[-5,10,1806],[0,10,1806]]'\n url_string = self._settings['protocol'] + self._settings['domain'] + \\\n self._settings['path'] + \"/input/bulk_json?apikey=\" + \\\n self._settings['apikey'] + \"&data=\" + data_string\n self._log.debug(\"URL string: \" + url_string)\n\n # Send data to server\n self._log.info(\"Sending to \" + \n self._settings['domain'] + self._settings['path'])\n try:\n result = urllib2.urlopen(url_string, timeout=60)\n except urllib2.HTTPError as e:\n self._log.warning(\"Couldn't send to server, HTTPError: \" + \n str(e.code))\n except urllib2.URLError as e:\n self._log.warning(\"Couldn't send to server, URLError: \" + \n str(e.reason))\n except httplib.HTTPException:\n self._log.warning(\"Couldn't send to server, HTTPException\")\n except Exception:\n import traceback\n self._log.warning(\"Couldn't send to server, Exception: \" + \n traceback.format_exc())\n else:\n if (result.readline() == 'ok'):\n self._log.debug(\"Send ok\")\n # Send ok -> empty buffer\n self._data_buffer = data_to_keep\n return True\n else:\n self._log.warning(\"Send failure\")", "def _retry_send_messages():\n\n max_retry_value = getattr(settings, \"DJMAIL_MAX_RETRY_NUMBER\", 3)\n queryset = models.Message.objects.filter(status=models.STATUS_FAILED)\\\n .filter(retry_count__lte=max_retry_value)\\\n .order_by(\"-priority\", \"created_at\")\n\n connection = _get_real_backend()\n paginator = Paginator(list(queryset), getattr(settings, \"DJMAIL_MAX_BULK_RETRY_SEND\", 10))\n\n for page_index in paginator.page_range:\n connection.open()\n for message_model in paginator.page(page_index).object_list:\n email = message_model.get_email_message()\n sended = connection.send_messages([email])\n\n if sended == 1:\n message_model.status = models.STATUS_SENT\n message_model.sent_at = timezone.now()\n else:\n message_model.retry_count += 1\n\n message_model.save()\n\n connection.close()", "def _flush(self):\n buffer_len = len(self._buffer)\n\n if buffer_len == 0:\n _log.info('No pending records to index; URI: %s; index: %s',\n self._uri, self._index)\n return\n\n _log.info('Indexing %d records; URI: %s; index: %s ...',\n buffer_len, self._uri, self._index)\n\n headers = {'Authorization': 'Splunk ' + self._token}\n\n try:\n response = self._session.post(self._uri,\n headers=headers,\n data=json.dumps(self._buffer),\n verify=self._ca_cert)\n\n log_data = ('URI: {}; index: {}; response status: {}; '\n 'response content: {}'\n .format(self._uri, self._index,\n response.status_code, response.text))\n\n if response.status_code != 200:\n _log.error('Failed to index %d records; HTTP status '\n 'code indicates error; %s',\n buffer_len, log_data)\n return\n\n try:\n j = response.json()\n except Exception as e:\n _log.error('Failed to get JSON from response; %s; '\n 'error: %s; %s', log_data, type(e).__name__, e)\n return\n\n if j['code'] != 0:\n _log.error('Failed to index %d records; Splunk status '\n 'code in JSON indicates error; %s',\n buffer_len, log_data)\n return\n\n _log.info('Indexed %d records; %s', buffer_len, log_data)\n del self._buffer[:]\n\n except requests.ConnectionError as e:\n _log.error('Failed to index %d records; connection error; '\n 'URI: %s; index: %s; error: %s: %s; ',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)\n\n except Exception as e:\n _log.error('Failed to index %d records; unexpected error; '\n 'URI: %s; index: %s; error: %s: %s',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)", "async def _clean_up_batch_of_old_cache_invalidations(\n self, delete_up_to_millisec: int\n ) -> bool:\n\n def _clean_up_batch_of_old_cache_invalidations_txn(\n txn: LoggingTransaction,\n ) -> bool:\n # First get the earliest stream ID\n txn.execute(\n \"\"\"\n SELECT stream_id FROM cache_invalidation_stream_by_instance\n ORDER BY stream_id ASC\n LIMIT 1\n \"\"\"\n )\n row = txn.fetchone()\n if row is None:\n return False\n earliest_stream_id: int = row[0]\n\n # Then find the last stream ID of the range we will delete\n txn.execute(\n \"\"\"\n SELECT stream_id FROM cache_invalidation_stream_by_instance\n WHERE stream_id <= ? AND invalidation_ts <= ?\n ORDER BY stream_id DESC\n LIMIT 1\n \"\"\",\n (earliest_stream_id + CLEAN_UP_MAX_BATCH_SIZE, delete_up_to_millisec),\n )\n row = txn.fetchone()\n if row is None:\n return False\n cutoff_stream_id: int = row[0]\n\n # Determine whether we are caught up or still catching up\n txn.execute(\n \"\"\"\n SELECT invalidation_ts FROM cache_invalidation_stream_by_instance\n WHERE stream_id > ?\n ORDER BY stream_id ASC\n LIMIT 1\n \"\"\",\n (cutoff_stream_id,),\n )\n row = txn.fetchone()\n if row is None:\n in_backlog = False\n else:\n # We are in backlog if the next row could have been deleted\n # if we didn't have such a small batch size\n in_backlog = row[0] <= delete_up_to_millisec\n\n txn.execute(\n \"\"\"\n DELETE FROM cache_invalidation_stream_by_instance\n WHERE ? <= stream_id AND stream_id <= ?\n \"\"\",\n (earliest_stream_id, cutoff_stream_id),\n )\n\n return in_backlog\n\n return await self.db_pool.runInteraction(\n \"clean_up_old_cache_invalidations\",\n _clean_up_batch_of_old_cache_invalidations_txn,\n )", "def batch(self, data, size):\n\n return [data[x : x + size] for x in range(0, len(data), size)]", "def valid_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def put_record(self, tag, json_str):\n a = 0\n while a < 2000:\n if a % 100 == 0 and a != 0:\n logger.info(\"A batch of 100 simple json records have been sent\")\n self.firehose_client.put_record(DeliveryStreamName=self.get_stream_name(tag),\n Record={\n 'Data': json_str\n }\n )\n a = a + 1\n logger.info(\"Records were placed successfully!!\")", "def create_batches(self, batch_size: int, repeat: bool, drop_last: bool, device: Device) -> None:\n self.repeat = repeat\n\n # Work out how cleanly we can divide the dataset into batch-sized parts\n num_batched_steps = self.indexed_corpus.shape[0] // batch_size\n\n # Trim off any extra elements that wouldn't cleanly fit (remainders)\n self.indexed_corpus = self.indexed_corpus.narrow(0, 0, num_batched_steps * batch_size)\n\n # Evenly divide the data across the bsz batches.\n raw_batches = self.indexed_corpus.view(batch_size, -1).t().contiguous().to(device)\n\n # If the last batch would be too short and drop_last is true, remove it\n if num_batched_steps % self.seq_len > 0 and drop_last:\n num_batched_steps -= num_batched_steps % self.seq_len\n\n self.num_batches = math.ceil(num_batched_steps / self.seq_len)\n\n self.batches = [raw_batches[n * self.seq_len: (n + 1) * self.seq_len + 1, :] for n in range(self.num_batches)]", "def on_batch_begin(self, batch, logs=None):", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def _Dynamic_Flush(self, request, unused_response, request_id):\n rl = self._pending_requests.get(request_id, None)\n if rl is None:\n return\n group = log_service_pb.UserAppLogGroup(request.logs())\n logs = group.log_line_list()\n for log in logs:\n al = self._pending_requests_applogs[request_id].add()\n al.time = log.timestamp_usec()\n al.level = log.level()\n al.message = log.message()", "def copy_in_chunks(self, chunk_size=None, throttle=None, start=None, limit=None):\n # On restart, foreign_keys exist, don't remake them\n self.create_triggers()\n\n self.chunk_size = chunk_size if chunk_size else self.db.config['DEFAULT_CHUNK_SIZE']\n throttle = throttle if throttle else self.db.config['DEFAULT_THROTTLE']\n\n if self.count == 0 or self.count != self.source.count:\n if not start:\n start = self.source.min_pk\n if not limit:\n limit = self.source.max_pk\n\n self.start_time = datetime.datetime.now()\n\n pointer = start\n if not (pointer and limit):\n pass\n else:\n while pointer < limit:\n self._copy_chunk(pointer)\n pointer = self._get_next_pk(pointer)\n self.log(start, pointer, limit)\n time.sleep(throttle)\n if pointer == limit:\n self._copy_chunk(pointer)\n self.log(start, pointer, limit)\n\n print('Copy complete! Adding referenced foreign keys')\n referenced_fks = [x for x in self.source.foreign_keys if x.referenced]\n self.add_foreign_keys(referenced_fks, override_table=self.name)\n return True", "def bulkupload_entitie_records(self, entity_upload_parameters, tmp_file, progress=None):\n records = self.service_client.factory.create(\"ns2:ArrayOfstring\")\n tmp_csv_file = io.open(tmp_file, encoding='utf-8-sig')\n\n records.string = [x.strip() for x in tmp_csv_file.readlines()]\n \n try:\n #print(self.service_client)\n response = self.service_client.UploadEntityRecords(\n AccountId=self._authorization_data.account_id,\n EntityRecords=records,\n ResponseMode=entity_upload_parameters.response_mode\n )\n if self.need_to_fall_back_to_async(response):\n headers = self.service_client.get_response_header()\n operation = BulkUploadOperation(\n request_id=response.RequestId,\n authorization_data=self._authorization_data,\n poll_interval_in_milliseconds=self._poll_interval_in_milliseconds,\n environment=self._environment,\n tracking_id=headers['TrackingId'] if 'TrackingId' in headers else None,\n **self.suds_options\n )\n file_path = self.download_upload_result(operation, entity_upload_parameters, progress)\n return self.read_result_from_bulk_file(file_path)\n else:\n return self.read_bulkupsert_response(response) \n except Exception as ex:\n if 'OperationNotSupported' == operation_errorcode_of_exception(ex):\n return self.bulkupload_entities(entity_upload_parameters, tmp_file, progress)\n else:\n raise ex", "def prepare_batch(data,BATCH_SIZE, filename):\n ### select the last BATCH_SIZE rows from batch dataset\n batch = data.iloc[-BATCH_SIZE:].values.tolist()\n batch_data = []\n \n ### remove white spaces in the list because predtion server expects no white spaces between elements\n for i in batch:\n str_row = str(i)\n str_row = str_row.replace(' ','')\n batch_data.append(str_row)\n \n ### write values in a file called filename\n with open(filename, 'w') as f:\n f.write(','.join(str(i) for i in batch_data))", "def _check_failures(self, response, batch=None):\n if not response.get('Failed'):\n return 0 # nothing to do here\n\n LOGGER.error('The following records failed to put to queue %s', self.queue.url)\n\n for failure in response['Failed']:\n # Pull out the record that matches this ID\n record = self._extract_message_by_id(batch, failure['Id']) if batch else None\n LOGGER.error(self._format_failure_message(failure, record=record))\n\n failed = len(response.get('Failed', []))\n self._log_failed(failed)\n\n # Raise an exception if this is the fault of the sender (us)\n if any(result['SenderFault'] for result in response['Failed']):\n raise SQSClientError('Failed to send records to SQS:\\n{}'.format(response))\n\n return failed", "def chunked_insert(model, items, chunk_size=150):\n # https://www.sqlite.org/limits.html#max_compound_select\n with db.atomic():\n for idx in range(0, len(items), chunk_size):\n model.insert_many(items[idx:idx+chunk_size]).execute()", "def on_limit(self, track):\n print ('Got Rate limit Message', str(track))\n return True # Don't kill the stream" ]
[ "0.61672634", "0.61136776", "0.55916774", "0.55586743", "0.55274945", "0.5349872", "0.5317542", "0.5314152", "0.51939815", "0.519289", "0.5187157", "0.51492345", "0.5112248", "0.50969017", "0.5063732", "0.5059225", "0.5043429", "0.5042894", "0.5032821", "0.49991766", "0.49933842", "0.49830627", "0.49705377", "0.4968449", "0.49657884", "0.49600086", "0.49515098", "0.49494943", "0.493872", "0.4938432", "0.49352816", "0.49345276", "0.49338925", "0.4928618", "0.49257973", "0.4922577", "0.49209362", "0.49153018", "0.49132475", "0.49115464", "0.4910887", "0.48955932", "0.48949468", "0.48927057", "0.4888395", "0.48880577", "0.48764712", "0.4874775", "0.48672694", "0.48636353", "0.48622143", "0.48613197", "0.48608592", "0.48559925", "0.4853976", "0.4850613", "0.48325717", "0.48324427", "0.48304957", "0.4829426", "0.48038167", "0.48010024", "0.4799141", "0.47982842", "0.47901815", "0.47832665", "0.47776806", "0.47772938", "0.4774183", "0.4773827", "0.47731042", "0.47691506", "0.4758725", "0.47544676", "0.47539383", "0.47538492", "0.4753197", "0.47366056", "0.47344705", "0.47214273", "0.47178593", "0.47147372", "0.4714686", "0.47082597", "0.47079474", "0.47040933", "0.4704016", "0.47028545", "0.46984693", "0.46967286", "0.46829346", "0.4681848", "0.46814826", "0.46771106", "0.4666679", "0.4665068", "0.46615016", "0.46601325", "0.4652519", "0.46518305" ]
0.62689286
0
Perform any final operations for this response, such as metric logging, etc
def _finalize(self, response, batch): if not response: return # Could happen in the case of backoff failing enitrely # Check for failures that occurred in PutRecordBatch after several backoff attempts # And log the actual record from the batch failed = self._check_failures(response, batch=batch) # Remove the failed messages in this batch for an accurate metric successful_records = len(batch) - failed MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_RECORDS_SENT, successful_records) LOGGER.info( 'Successfully sent %d message(s) to queue %s', successful_records, self.queue.url )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main_response(self, data):", "def main_response(self, data):", "def finalize_response(self, request, response, *args, **kwargs):\n if response.status_code == HTTP_201_CREATED:\n diagnosis = Diagnosis.objects.get(pk=response.data.get('id'))\n serializer = calculate_percentage(diagnosis)\n response.data['score'] = serializer.data\n return super(DiagnosisViewset, self).finalize_response(request, response, *args, **kwargs)", "def on_endpoint_final(self, data):\n logger.debug('Final data from endpoint')", "def after_request(response):\n # TODO: Send log reports to a monitor service such as DataDog?\n return response", "def setup_response_collector(self):\n pass", "def process_response(self, response: response_domain_model):\n ...", "def finalize_response(self, request, response, *args, **kwargs):\n\t\t# Make the error obvious if a proper response is not returned\n\t\tassert isinstance(response, BaseResponse), (\n\t\t\t'Expected a `Response` object '\n\t\t\t'to be returned from the view, but received a `%s`'\n\t\t\t% type(response)\n\t\t)\n\t\treturn response", "def __call__(self, request):\n request.start_time = time.time()\n\n response = self.get_response(request)\n\n log_data = self.extract_log_info(request=request, response=response)\n logger.info(log_data)\n\n return response", "def __refresh(self):\n\n\t\tself.status_code = self.__response.status_code\n\n\t\t# Handle errors. Sometimes there may not be a response body (which is\n\t\t# why ValueError) must be caught.\n\t\ttry:\n\t\t\tif (self.__response.status_code not in [200,204]) or \"error\" in self.__response.json():\n\t\t\t\terror = self.__response.json()\n\t\t\t\traise ResponseError(error[\"error\"][\"message\"], error[\"error\"][\"code\"])\n\t\texcept ValueError:\n\t\t\t# Sometimes DELETE returns nothing. When this is the case, it will\n\t\t\t# have a status code 204\n\t\t\tif self.__response.request.method is not \"DELETE\" and self.__response.status_code is not 204:\n\t\t\t\traise ResponseError(\"Unknown error occured.\", self.__response.status_code)\n\n\t\t# Get information on paging if response is paginated\n\t\tif \"X-Resultset-Total\" in self.__response.headers and self.__response.headers[\"X-Resultset-Total\"] > self.__response.headers[\"X-Resultset-Limit\"]:\n\t\t\tself.is_paginated = True\n\t\t\tself.records = int(self.__response.headers.get(\"X-Resultset-Total\"))\n\t\t\tself.page_size = int(self.__response.headers[\"X-Resultset-Limit\"])\n\t\t\tself.current_page = int(self.__response.headers[\"X-Resultset-Page\"])\n\t\t\tself.max_page = int(math.ceil(float(self.records)/int(self.page_size)))\n\n\t\t# Save the content of the request\n\t\ttry:\n\t\t\tself.json = self.__response.json()\n\t\texcept ValueError:\n\t\t\t# Sometimes DELETE returns nothing. When this is the case, it will have a status code 204\n\t\t\tif self.__response.request.method == \"DELETE\" and self.__response.status_code is 204:\n\t\t\t\tself.json = {\"success\":True}\n\t\t\telse:\n\t\t\t\traise ValueError(\"No JSON object could be decoded\" + self.__response.request.method)", "async def _response_handler(self):", "def on_response(self, response):\n pass", "def get_final_response(self,request,response):\n return response", "def after_request(response):\n if hasattr(request, \"_prometheus_metrics_request_start_time\"):\n request_latency = max(\n default_timer() - request._prometheus_metrics_request_start_time, 0\n )\n REQUEST_DURATION_HISTOGRAM.labels(\n APP_NAME,\n request.method,\n request.endpoint,\n response.status_code,\n ).observe(request_latency)\n REQUESTS_COUNT.labels(\n APP_NAME,\n request.method,\n request.endpoint,\n response.status_code,\n ).inc()\n return response", "def RequestHandler_finish(self):\n if self.request._oboe_finish_ev and self.request._oboe_ctx and self.request._oboe_ctx.is_valid():\n ev = self.request._oboe_finish_ev\n ctx = self.request._oboe_ctx\n if hasattr(self, 'get_status'): # recent Tornado\n ev.add_info(\"Status\", self.get_status())\n elif hasattr(self, '_status_code'): # older Tornado\n ev.add_info(\"Status\", self._status_code)\n\n ev.add_edge(oboe.Context.get_default())\n ctx.report(ev)\n\n # clear the stored oboe event/metadata from the request object\n self.request._oboe_ctx = None\n self.request._oboe_finish_ev = None", "def after_request(response):\n request_latency = time.time() - request._prometheus_metrics_request_start_time\n METRICS_REQUEST_LATENCY.labels(request.method, request.path).observe(\n request_latency\n )\n METRICS_REQUEST_COUNT.labels(\n request.method, request.path, response.status_code\n ).inc()\n return response", "def handle_finished (self):\n\n print self.in_headers\n print self.in_cookies\n print self.content_type\n print self.content_encoding\n print self.response_code\n print self.is_allowing_persistence\n print self.content", "def process_response(self, request, response):\n return response", "def process_response(self, request, response):\n return response", "def compute_response(self, items_to_process):\n pass", "def _process_request(self, request, response):\n ...", "def process_response(self,response):\n return self.action.process_response(response)", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def process_response(self, response):\n return response", "def handle_filter_operations_response(self, response):\n\n if self.resource['operation'] == 'find':\n self.resource['metadata']['Results'] = \\\n [response[i] for i in range(response.count())]\n\n elif self.resource['operation'] == 'update_one':\n self.resource['metadata']['matched_count'] = \\\n response.matched_count\n self.resource['metadata']['modified_count'] = \\\n response.modified_count\n\n elif self.resource['operation'] == 'delete_many':\n self.resource['metadata']['deleted_count'] = \\\n response.deleted_count", "def after_request(self, response):\n # only track data for specified blueprints\n if self.blueprints:\n if request.blueprint not in self.blueprints:\n return response\n\n t_0 = getattr(g, 'start_time', dt.datetime.now())\n\n visit = dict(\n session_id=session.get('UUID', 0),\n timestamp=timestamp(),\n url=request.url,\n view_args=request.view_args,\n status_code=response.status_code,\n path=request.path,\n latency=(dt.datetime.now() - t_0).microseconds / 100000,\n content_length=response.content_length,\n referer=request.referrer,\n values=request.values\n )\n self.store_visit(visit)\n self.update_top_list(request.path)\n return response", "def perform(self):\n pass", "def processing(self):\n pass", "def onFinish(self):\n self.finalizeStats()", "def finalize_integration(self, **kwargs):", "def _processGETResp(self, output, request):\r\n msg = {'key' : output}\r\n\r\n self._render_GET(request, httplib.OK,\r\n 'application/json; charset=utf-8', json.dumps(msg))", "def fetch(self):\n\n\n # Update Prometheus metrics with application metrics\n self.current_requests.set(get_current_requests())\n self.total_uptime.set(get_uptime())\n self.health.state(get_health())", "def process_server_response(self, server_response):", "def _request_finished(self):\n\n self._next_request = self._next_request_ts()\n\n self._logger.debug(\"next call at %s\" % (time.strftime(\"%H:%M:%S\", time.localtime(self._next_request))))", "def get_post_stats(self):\n stats = self.stats\n stats.results = self.job.result().get_counts(stats.iteration)\n stats.datetime = str(datetime.now())", "def finalize_response(self, request, response, status, checksum=None, error_msg=None, exception=False):\n if error_msg:\n response[\"error_msg\"] = error_msg\n if exception:\n response[\"exception\"] = traceback.format_exc()\n self.log.exception(error_msg)\n else:\n self.log.error(error_msg)\n\n self.log.info(\"Finalizing response\", status=status, id=request[\"id\"], uuid=request[\"uuid\"])\n response[\"id\"] = request[\"id\"]\n response[\"uuid\"] = request[\"uuid\"]\n response[\"status\"] = status\n response[\"operation\"] = request[\"operation\"]\n response[\"file_details\"] = request[\"file_details\"]\n\n # Date formatted to be consistent with DBMS implementation\n response[\"effective_date\"] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S.%f+00\")\n\n if checksum:\n response[\"file_details\"][\"checksum\"] = checksum\n\n fn = os.path.join(self.backup_response_dir, request[\"request_fn\"] + \"_response\")\n with open(fn, \"w\") as resp_f:\n resp_f.write(json.dumps(response))\n os.remove(request[\"request_pathname\"])\n return status", "def finish(self):\n self.logger.debug(\"%s -> finish()\" % self)\n self.lines = ''.join(self.response_data).split(CRLF)\n\n if len(self.lines) < 1:\n raise nntplib.NNTPDataError(\"No data received\")\n\n self.response_code, self.response_message = self.lines[0][:3], \\\n self.lines[0][3:].strip()\n\n self.logger.debug(\"code = %s\" % self.response_code)\n self.logger.debug(\"msg = %s\" % self.response_message)", "def handleResponseEnd(self):\r\n try:\r\n if not self._finished:\r\n reactor.callInThread(\r\n self.resource.cacheContent,\r\n self.father,\r\n self._response,\r\n self.buffer\r\n )\r\n proxy.ProxyClient.handleResponseEnd(self)\r\n except RuntimeError:\r\n # because we don't care if the user hits\r\n # refresh before the request is done\r\n pass", "def process_resp(self, msg, operation, status, index):\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, operation)\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, status))\n if status == \"0\":\n self.cause.extend(msg)\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, \"health\")\n analyse_status = MonitorStatus[\"red\"]\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, analyse_status))", "def __init__(self):\n self._results = {}\n self._logs = {}", "def send_final_data(self):\n self.ctx['total'] = self.total_issues\n self.ctx['past_day'] = self.past_day\n self.ctx['past_week'] = self.past_week\n self.ctx['more_than_a_week'] = self.more_than_a_week\n\n return self.ctx", "def _postproc(self, request):\n if request.status_code != 200: raise Exception('wrong error code: {0}'.format(request.status_code))\n data = request.json()\n self.data = self._finalize_data(data)", "def final(self):\n pass", "def measure(self):\n self.completed = False\n self.message = ''\n for res in self.results:\n self.results[res] = None\n return self", "def finalise(self):", "def process(self, request):\n pass", "def writeResponse(response):", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def _last_data_hook(self):\n data_dictionary = {}\n\n if self._return_code == self.OK:\n data_dictionary['status'] = 'Finished'\n elif self._return_code == self.ERROR:\n data_dictionary['status'] = 'Error'\n data_dictionary['speed'] = ''\n data_dictionary['eta'] = ''\n elif self._return_code == self.WARNING:\n data_dictionary['status'] = 'Warning'\n data_dictionary['speed'] = ''\n data_dictionary['eta'] = ''\n elif self._return_code == self.STOPPED:\n data_dictionary['status'] = 'Stopped'\n data_dictionary['speed'] = ''\n data_dictionary['eta'] = ''\n elif self._return_code == self.ALREADY:\n data_dictionary['status'] = 'Already Downloaded'\n else:\n data_dictionary['status'] = 'Filesize Abort'\n\n self._hook_data(data_dictionary)", "def finish(self):\n sev = 'info'\n desc = 'successful'\n col = 0\n if not self.derive_values and not self.gauge_values:\n sev = 'error'\n desc = 'unsuccessful'\n else:\n col = len(self.derive_values) + len(self.gauge_values)\n\n dur = time.time() - self.poll_start_time\n getattr(self.logger, sev)('%s poll %s' % (self.__class__.__name__, desc),\n extra={\"duration\": \"%.3f\" % dur, \"collected\": col})", "def perform(self):\n return", "def perform(self):\n return", "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()", "def update_response(self, response):\n\n if self.resource['operation'] in PyMongoEvent.INSERT_OPERATIONS:\n self.handle_insert_operations_response(response)\n\n elif self.resource['operation'] in PyMongoEvent.FILTER_OPERATIONS:\n self.handle_filter_operations_response(response)", "def parse_response(self):\n pass", "def handle_response(self, response):\n\n self._tmp_request_args = {}\n self.cache_response(response)", "def response_handling(self) -> global___Snippet.StreamingResponseHandling:", "def response_handling(self) -> global___Snippet.StreamingResponseHandling:", "def _record_result(self, action, data, tags=None):\r\n if tags is None:\r\n tags = []\r\n\r\n tags.append(u'result:{}'.format(data.get('success', False)))\r\n tags.append(u'action:{}'.format(action))\r\n dog_stats_api.increment(self._metric_name('request.count'), tags=tags)", "def _process(self):\n self.kwargs[\"collect\"].change_status(self.kwargs[\"collect\"].FINALIZED)", "def after_request(response):\n # This avoids the duplication of registry in the log,\n # since that 500 is already logged via @app.errorhandler.\n if response.status_code != 500:\n ts = strftime('[%Y-%b-%d %H:%M]')\n message = '{0} {1} {2} {3} {4} {5}'.format(\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n response.status)\n print(message)\n return response", "def _send(self):\n executor_id = self.status['executor_id']\n job_id = self.status['job_id']\n call_id = self.status['call_id']\n act_id = self.status['activation_id']\n\n if self.status['type'] == '__init__':\n init_key = create_init_key(executor_id, job_id, call_id, act_id)\n self.internal_storage.put_data(init_key, '')\n\n elif self.status['type'] == '__end__':\n status_key = create_status_key(executor_id, job_id, call_id)\n dmpd_response_status = json.dumps(self.status)\n drs = sizeof_fmt(len(dmpd_response_status))\n logger.info(\"Storing execution stats - Size: {}\".format(drs))\n self.internal_storage.put_data(status_key, dmpd_response_status)", "def results(self):\r\n pass", "def execute(self, response):\n raise NotImplementedError()", "def Results(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def after(response):\n app.logger.info(\"Local Timestamp: {}\".format(str(datetime.now())))\n app.logger.info(\"Response Code: {}\".format(response.status))\n app.logger.info(\"Response Headers:{}\\n{}\\n{}\".format(\"-\"*43,str(response.headers)[:-3], \"-\"*60))\n # hide password from logs\n body = response.json\n if type(body) is dict and \"password\" in body:\n body['password'] = \"[provided]\"\n if type(body) is dict and \"access_token\" in body:\n body['access_token'] = \"[provided]\"\n app.logger.info(\"Response Body: {}\\n\".format(body))\n return response", "def finish(self, result: Dict):", "def post(self):\n raise NotImplementedError()", "def process(self):\n self.output_info = self.attributes.copy()", "def process_results(self, response, results):\n return results", "def process_results(self, response, results):\n return results", "def finalize(self):\n return", "def process(self):\n pass", "def perform_final_actions(self):\n for finalizer_function in self._finalizer_functions:\n finalizer_function()", "def process(self, payload, status_code=0):", "def process_response(self, response):\n json = response.json()\n for resp in json[\"responses\"]:\n sub_qry = self._current_query.get(int(resp[\"id\"]))\n self.context.pending_request().map_json(resp[\"body\"], sub_qry.return_type)", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def __response__(self) -> requests.Response:\n return self._self_response", "def process(self, results):\n raise NotImplementedError", "def results(self):\n pass", "def handle_response(self, response):\n self.__log(f'Received response from server. The code is: \"{response}\"')\n if not response.status_code == 200:\n self.handle_api_error(response)\n self.to_output_file(response.text)", "def __call__(self, request: HttpRequest) -> HttpResponse:\n ip_address = remote_addr(request)\n request.geo_data = self.geo_data(ip_address)\n response = self.get_response(request)\n if self.add_response_headers(request):\n annotate_response(response, request.geo_data)\n return response", "def process_after_request_hooks(self, resp):\n\n hooks = []\n meth_hooks = getattr(\n self,\n 'after_{method}_hooks'.format(method=self.meth),\n []\n )\n\n hooks.extend(meth_hooks)\n hooks.extend(self.after_all_hooks)\n\n if self.resource:\n hooks.extend(self.resource.after_all_hooks)\n hooks.extend(self.resource.api.after_all_hooks)\n\n for hook in chain(hooks):\n resp = hook(self, resp)\n\n return resp", "def simulate_response(self, documents):", "def _init_(self):\n self.res = {}", "def event_handler(self, response):\n pass", "def process(self):\n\n # validate processing\n if self.is_acceptable():\n # handle data and write log\n self.handle()", "async def _receive_updated_response(self, data):\n serialized_text_responses = await serialize_text_algo_api_response(data)\n await self.send_serialized_data(serialized_text_responses)", "def do_GET(self):\n # nothing more to do before handle_data()\n self.body = {}\n self.handle_data()", "def process_response(self, resp):\n status_code = self.get_status_code(resp)\n if not 200 <= status_code < 300:\n return resp\n\n try:\n (subject_id, method, version) = self._fetch_request_info(\n resp.request)\n except TypeError:\n return resp\n\n if method == 'GET' and status_code == 204:\n # Bugfix:1251055 - Don't cache non-existent subject files.\n # NOTE: Both GET for an subject without locations and DELETE return\n # 204 but DELETE should be processed.\n return resp\n\n method_str = '_process_%s_response' % method\n try:\n process_response_method = getattr(self, method_str)\n except AttributeError:\n LOG.error(_LE('could not find %s') % method_str)\n # Nothing to do here, move along\n return resp\n else:\n return process_response_method(resp, subject_id, version=version)" ]
[ "0.63405913", "0.63405913", "0.6215644", "0.61835784", "0.61431", "0.61020917", "0.6028117", "0.6016297", "0.5995891", "0.5948878", "0.5946144", "0.59437275", "0.59169286", "0.5887103", "0.5878732", "0.58776665", "0.58697927", "0.5867941", "0.5867941", "0.5843703", "0.58230263", "0.5812737", "0.57915246", "0.57915246", "0.5786817", "0.57814777", "0.5759068", "0.5758125", "0.5753212", "0.57487625", "0.5740849", "0.57364345", "0.5732983", "0.57293624", "0.57217234", "0.5720464", "0.5718075", "0.57164097", "0.5712789", "0.5703103", "0.5694426", "0.56882864", "0.5686702", "0.5684542", "0.56827754", "0.5682106", "0.56803536", "0.56683034", "0.56611633", "0.56611633", "0.56611633", "0.56611633", "0.56611633", "0.565728", "0.5653874", "0.5649313", "0.5649313", "0.5647752", "0.5644203", "0.5632481", "0.56306934", "0.56025624", "0.56025624", "0.55966866", "0.5589376", "0.558748", "0.5577691", "0.5571867", "0.555898", "0.55581206", "0.5544642", "0.5539987", "0.5532054", "0.5524914", "0.55204755", "0.55204755", "0.55163455", "0.551516", "0.5505215", "0.5501735", "0.54944503", "0.5489526", "0.5489526", "0.5489526", "0.5489526", "0.5489526", "0.5489526", "0.5482999", "0.54827994", "0.5481607", "0.54792935", "0.54783523", "0.5477041", "0.54748774", "0.54743373", "0.54687405", "0.5456455", "0.545552", "0.5453174", "0.5449082" ]
0.6289759
2
Inspect the response and remove any records records that have successfully to sent For each record, the index of the response element is the same as the index used in the request array.
def _strip_successful_records(cls, messages, response): success_ids = { item['Id'] for item in response['Successful'] } LOGGER.info('Removing sucessful message indices from batch: %s', success_ids) for success_id in success_ids: # Get the successful message by ID and remove it message = cls._extract_message_by_id(messages, success_id) if not message: continue messages.remove(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_none_response(self):\n\n print(\"# Rows before non response are removed: {} \".format(len(self.data)))\n self.data = self.data[self.data['names'].map(lambda d: len(d) > 0)]\n print(\"# Rows after non response are removed: {} \".format(len(self.data)))", "def _finalize(self, response, batch):\n if not response:\n return # Could happen in the case of backoff failing enitrely\n\n # Check for failures that occurred in PutRecordBatch after several backoff attempts\n # And log the actual record from the batch\n failed = self._check_failures(response, batch=batch)\n\n # Remove the failed messages in this batch for an accurate metric\n successful_records = len(batch) - failed\n\n MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_RECORDS_SENT, successful_records)\n LOGGER.info(\n 'Successfully sent %d message(s) to queue %s',\n successful_records,\n self.queue.url\n )", "def cleanupRequests(n=10):\n\n # formula for filtering data from airtable\n formula = 'AND(DATETIME_DIFF(NOW(), {Last Modified}, \"days\") > 30, Status = \"Request Complete\")'\n\n # airtable query\n headers = {\"Authorization\": \"Bearer {}\".format(os.environ['AIRTABLE_AUTH_TOKEN'])}\n params = params = {\n 'maxRecords': 10,\n 'view': 'All Requests + Data',\n 'sortField':'Last Modified',\n 'sortDirection': 'asc',\n 'filterByFormula': formula\n\n }\n\n\n r = requests.get(os.environ['PROD_URL'], headers=headers, params=params)\n\n # if status code is good ...\n if r.status_code == 200:\n\n # instantiate twilio client\n client = Client(os.environ['ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN'])\n\n # iterate through records\n for record in r.json()['records']:\n\n data = {\n 'fields':\n {'Message': \"\",\n 'First Name': \"\"\n }\n }\n\n # patch the requisite fields\n r = requests.patch(\n os.environ['PROD_URL'] + record['id'] , headers=headers, json=data\n )\n\n # erase the recordings associated with the call SID\n call_sid = record['fields']['Twilio Call Sid']\n call = client.calls(call_sid).fetch()\n\n for recording_sid in call.recordings.list():\n client.recordings(recording_sid).delete()\n\n # confirm deletion\n successfully_deleted = 0\n r = requests.get(os.environ['PROD_URL'] + record['id'], headers=headers)\n call = client.calls(call_sid).fetch()\n\n if all([r.status_code == 200, \n 'Message' not in r.json().keys(), \n 'First Name' not in r.json().keys(),\n len(call.recordings.list()) == 0]):\n print('succesfully deleted')\n successfully_deleted += 1\n \n else:\n print('error')\n\n return str(successfully_deleted)", "def remove_record_failure():\n\t\tpass", "def sanitize_reply_buffer(self): \n for i in self.async_reply_buffer:\n\n if not i.endswith('\\n'):\n \n i = self.async_reply_buffer.index(i)\n temp = self.async_reply_buffer\n #with suppress(IndexError):\n if i+1 == len(temp):\n return 'SANFAIL'\n if i < len(temp):\n #print(i)\n #print(len(temp))\n #print(temp)\n #print(temp[i])\n #print(temp[i+1])\n temp[i] = temp[i] + temp[i+1]\n temp.pop(i+1)\n self.async_reply_buffer = temp\n\n\n #print(self.async_reply_buffer)", "def test_handle_response_remove_request_from_pending(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n contact = lookup.shortlist[0]\n msg = Value(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, self.target,\n 'value', time.time(), time.time() + 99999, self.version,\n PUBLIC_KEY, 'name', 'signature')\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n self.assertNotIn(uuid, lookup.pending_requests.keys())", "def _flush(self):\n buffer_len = len(self._buffer)\n\n if buffer_len == 0:\n _log.info('No pending records to index; URI: %s; index: %s',\n self._uri, self._index)\n return\n\n _log.info('Indexing %d records; URI: %s; index: %s ...',\n buffer_len, self._uri, self._index)\n\n headers = {'Authorization': 'Splunk ' + self._token}\n\n try:\n response = self._session.post(self._uri,\n headers=headers,\n data=json.dumps(self._buffer),\n verify=self._ca_cert)\n\n log_data = ('URI: {}; index: {}; response status: {}; '\n 'response content: {}'\n .format(self._uri, self._index,\n response.status_code, response.text))\n\n if response.status_code != 200:\n _log.error('Failed to index %d records; HTTP status '\n 'code indicates error; %s',\n buffer_len, log_data)\n return\n\n try:\n j = response.json()\n except Exception as e:\n _log.error('Failed to get JSON from response; %s; '\n 'error: %s; %s', log_data, type(e).__name__, e)\n return\n\n if j['code'] != 0:\n _log.error('Failed to index %d records; Splunk status '\n 'code in JSON indicates error; %s',\n buffer_len, log_data)\n return\n\n _log.info('Indexed %d records; %s', buffer_len, log_data)\n del self._buffer[:]\n\n except requests.ConnectionError as e:\n _log.error('Failed to index %d records; connection error; '\n 'URI: %s; index: %s; error: %s: %s; ',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)\n\n except Exception as e:\n _log.error('Failed to index %d records; unexpected error; '\n 'URI: %s; index: %s; error: %s: %s',\n buffer_len, self._uri, self._index,\n type(e).__name__, e)", "def __send_responses(self):\n # create a copy of the responses\n responses = self.__responses\n # for every response\n for response in responses:\n # send the response\n self.__send(response)\n # remove the response from the responses' list\n if response in self.__responses:\n self.__responses.remove(response)", "def _check_failures(self, response, batch=None):\n if not response.get('Failed'):\n return 0 # nothing to do here\n\n LOGGER.error('The following records failed to put to queue %s', self.queue.url)\n\n for failure in response['Failed']:\n # Pull out the record that matches this ID\n record = self._extract_message_by_id(batch, failure['Id']) if batch else None\n LOGGER.error(self._format_failure_message(failure, record=record))\n\n failed = len(response.get('Failed', []))\n self._log_failed(failed)\n\n # Raise an exception if this is the fault of the sender (us)\n if any(result['SenderFault'] for result in response['Failed']):\n raise SQSClientError('Failed to send records to SQS:\\n{}'.format(response))\n\n return failed", "def clearList(self):\n\n if not RequestsDAO().getRequests():\n return jsonify(Error=\"No requests found\"), 404\n else:\n\n RequestsDAO().truncateTurnTable()\n return jsonify(TURN=\"Table content was deleted\"), 200", "def _recordsToResponse(self, records):\n fieldsList = []\n count = 0\n if records:\n size = 0\n while size < self._maxSize:\n try:\n record = records.pop()\n except (KeyError, IndexError):\n # We're done.\n # Note: because records is an iterable (list or set)\n # we're catching both KeyError and IndexError.\n break\n pickled = pickle.dumps(self.recordToDict(record))\n size = size + len(pickled)\n fieldsList.append(pickled)\n count += 1\n\n response = {\"items\": fieldsList}\n\n if records:\n response[\"continuation\"] = self._storeContinuation(records, \"records\")\n\n return response", "def drop_matching_records(self, check):\n matches = self._match(check)\n for rec in matches:\n self._drop_bytes(rec)\n del self._records[rec['msg_id']]", "def _on_tracking_failure(self, response, data):\n try:\n response = json.loads(response)\n except:\n # the response should be in JSON, but in case it can't be parsed just try another attempt\n logging.debug(\"cannot parse tracker response, should be valid JSON\")\n return response\n\n # remove the successfully tracked hits from payload\n tracked = response['tracked']\n data['requests'] = data['requests'][tracked:]\n\n return response['message']", "def test_response(self):\n for i, response in enumerate(RESPONSES):\n with self.subTest(i=i):\n self.assertDictContainsSubset(response, dict(self.responses[i].data))", "def reset(self):\n # Remove all successful action records\n to_remove = []\n for action_record, (p_valid, result_text) in self.action_records.items():\n if p_valid > .5:\n to_remove.append(action_record)\n for action_record in to_remove:\n del self.action_records[action_record]", "def response( self, request, error_code, data ):\n array = []\n if request == b'CAUTH' and data != self.__null_byte:\n # process differently\n data_array = self.ds_document.break_data(data)\n # print('after data is broken: {}'.format(data_array))\n for item in data_array: # for all the items we have to generate a different timestamp and checkum\n timestamp = self.get_time()\n checksum = self.get_checksum(timestamp, item)\n array.append([request, checksum, timestamp, error_code, item])\n # print(array)\n # print(array)\n return array\n\n else: # if we are sending a generic response, then\n timestamp = self.get_time()\n checksum = self.get_checksum(timestamp, data)\n\n array = [request, checksum, timestamp, error_code, data]\n return array", "def test_ten_results_returned(delete_previous_db_record):\n request = create_client().gateway.getResults(\n search=\"some string\").response()\n\n # Assert sucessful request\n assert_that(request.result.status, equal_to('200'))\n\n \"\"\"\n I'm assuming the json object uses a list to contain\n the results\n \"\"\"\n assert_that(len(request.result.results, equal_to(10)))", "def test_remove_expired(self):\n req1 = FakeRequest(1, True)\n req2 = FakeRequest(2, False)\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, True)\n req5 = FakeRequest(5, False)\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n self.request_buffer.remove_expired()\n\n self.assertTrue(\n req2 in self.request_buffer.requests and\n req5 in self.request_buffer.requests\n )", "def consume_data(self, data):\n # Get parameters\n logger_manager = data['logger_manager']\n doc_m = data['document_manager']\n message_id = data['message_id']\n documents = data['documents']\n to_remove_queue = data['to_remove_queue']\n duplicates = no_requestInTs = 0\n hash_set = set()\n\n for current_document in documents:\n\n # Mark to removal documents without requestInTs immediately (as of bug in xRoad software ver 6.22.0)\n if current_document['requestInTs'] is None and current_document['securityServerType'] is None:\n to_remove_queue.put(current_document['_id'])\n no_requestInTs += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('no_requestInTs',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Check if is batch duplicated\n current_document_hash = doc_m.calculate_hash(current_document)\n if current_document_hash in hash_set:\n # If yes, mark to removal\n to_remove_queue.put(current_document['_id'])\n duplicates += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('batch_duplicated',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Check if is database duplicated\n if self.db_m.check_if_hash_exists(current_document_hash):\n # If here, add to batch duplicate cache\n hash_set.add(current_document_hash)\n duplicates += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('database_duplicated',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Mark hash as seen\n hash_set.add(current_document_hash)\n # Find possible matching documents\n matching_documents = self.db_m.find_by_message_id(current_document)\n # Try to match the current document with possible pairs (regular)\n merged_document = doc_m.find_match(current_document, matching_documents)\n matching_type = ''\n\n if merged_document is None:\n # Try to match the current document with orphan-matching\n merged_document = doc_m.find_orphan_match(current_document, matching_documents)\n if merged_document is not None:\n matching_type = 'orphan_pair'\n else:\n matching_type = 'regular_pair'\n\n if merged_document is None:\n matching_type = 'orphan'\n if current_document['securityServerType'] == 'Producer':\n new_document = doc_m.create_json(None, current_document, None, current_document_hash, message_id)\n else:\n if current_document['securityServerType'] != 'Client':\n current_document['securityServerType'] = 'Client'\n new_document = doc_m.create_json(current_document, None, current_document_hash, None, message_id)\n\n new_document = doc_m.apply_calculations(new_document)\n new_document['correctorTime'] = database_manager.get_timestamp()\n new_document['correctorStatus'] = 'processing'\n new_document['matchingType'] = matching_type\n\n # Mark non-xRoad queries as 'done' instantly. No reason to wait matching pair\n if 'client' in new_document and new_document['client'] is not None and 'clientXRoadInstance' in new_document['client'] \\\n and new_document['client']['clientXRoadInstance'] is None:\n new_document['correctorStatus'] = 'done'\n new_document['matchingType'] = 'orphan'\n\n self.db_m.add_to_clean_data(new_document)\n\n else:\n\n if current_document['securityServerType'] == 'Client':\n\n if merged_document['client'] is None:\n merged_document['client'] = current_document\n merged_document = doc_m.apply_calculations(merged_document)\n merged_document['clientHash'] = current_document_hash\n merged_document['correctorTime'] = database_manager.get_timestamp()\n merged_document['correctorStatus'] = 'done'\n merged_document['matchingType'] = matching_type\n self.db_m.update_document_clean_data(merged_document)\n else:\n # This should never-ever happen in >= v0.4.\n msg = '[{0}] 2 matching clients for 1 producer: {1}'.format(self.worker_name, current_document)\n logger_manager.log_warning('corrector_merging', msg)\n\n else:\n\n if merged_document['producer'] is None:\n merged_document['producer'] = current_document\n merged_document = doc_m.apply_calculations(merged_document)\n merged_document['producerHash'] = current_document_hash\n merged_document['correctorTime'] = database_manager.get_timestamp()\n merged_document['correctorStatus'] = 'done'\n merged_document['matchingType'] = matching_type\n self.db_m.update_document_clean_data(merged_document)\n else:\n # This should never-ever happen in >= v0.4.\n msg = '[{0}] 2 matching producers for 1 client: {1}'.format(self.worker_name, current_document)\n logger_manager.log_error('corrector_merging', msg)\n\n self.db_m.mark_as_corrected(current_document)\n\n if no_requestInTs:\n msg = '[{0}] {1} document(s) without requestInTs present'.format(self.worker_name, no_requestInTs)\n logger_manager.log_warning('corrector_no_requestInTs', msg)\n\n return duplicates", "def curent_sesion_cleanup(self):\r\n\r\n for key,value in self.curent_sesion.items():\r\n for idx in value:\r\n requests.delete(key + str(idx), headers=self.headers)\r\n for check in requests.get(key,headers=self.headers).json()['results']:\r\n if idx in check.values():\r\n return False\r\n self.curent_sesion[key].clear()\r\n return True", "def forget(self, request):\n return []", "def forget(self, request):\n return []", "def check_for_requests(self):\n while True:\n doc = self.cc.requests_coll.find_one_and_delete(\n {'receiver': 'validator'}, sort=[('_id', pymongo.ASCENDING)]\n )\n if doc is None:\n break\n\n if doc['action'] == 'validate_upload':\n print(\"fulfil request: set valid: {} for upload_id {}\".format(doc['valid'], doc['upload_id']))\n self.validate_upload(ObjectId(doc['upload_id']), doc['valid'])", "def remove_deleted_dos_records():\n count = 0\n dos = DirectlyObservedSprayingForm.objects.last()\n formid = dos.data.get(\"_xform_id\") if dos else DIRECTLY_OBSERVED_FORM_ID\n if formid:\n data = fetch_form_data(formid, dataids_only=True)\n if not data:\n return count\n\n pks = [i[\"_id\"] for i in data]\n deleted_submissions = DirectlyObservedSprayingForm.objects.exclude(\n submission_id__in=pks\n )\n count = deleted_submissions.count()\n deleted_submissions.delete()\n\n return count", "def DeleteResponseHeader(self, name):\n assert name.islower()\n self._wpr_response.original_headers = \\\n [x for x in self._wpr_response.original_headers if x[0].lower() != name]", "def process_response(request, response):\n # A higher middleware layer may return a request which does not contain\n # messages storage, so make no assumption that it will be there.\n if hasattr(request, '_events'):\n # noinspection PyProtectedMember\n unstored_events = request._events.update(response)\n if unstored_events and settings.DEBUG:\n raise ValueError('Not all temporary events could be stored.')\n return response", "def simulate_response(self, documents):", "def end(response):\n if isinstance(response.response, ClosingIterator):\n return response\n\n diff = time.time() - request.start\n del request.start\n\n if response.response:\n response.response[0] = response.response[0].replace('__EXECUTION_TIME__', '{:.3}'.format(diff))\n response.headers[\"content-length\"] = len(response.response[0])\n\n return response", "def delete_session_records(self):\n self._session_records.reverse()\n self.builtin.log(\"Deleting {} records\".format(len(self._session_records)))\n for record in self._session_records[:]:\n self.builtin.log(\" Deleting {type} {id}\".format(**record))\n try:\n self.salesforce_delete(record[\"type\"], record[\"id\"])\n except SalesforceResourceNotFound:\n self.builtin.log(\" {type} {id} is already deleted\".format(**record))\n except Exception as e:\n self.builtin.log(\n \" {type} {id} could not be deleted:\".format(**record),\n level=\"WARN\",\n )\n self.builtin.log(\" {}\".format(e), level=\"WARN\")", "def process_incoming_response(self, response):\n # Validate the response.\n if not {\"__id\", \"__data\", \"__error\"}.issubset(iterkeys(response)):\n self.disconnect(\"Bad response received\")\n logger.warning(\"Response is missing some fields, ignoring.\")\n return\n\n # Determine the ID.\n id_ = response[\"__id\"]\n\n if id_ not in self.pending_outgoing_requests:\n logger.warning(\"No pending request with id %s found.\", id_)\n return\n\n request = self.pending_outgoing_requests.pop(id_)\n result = self.pending_outgoing_requests_results.pop(id_)\n error = response[\"__error\"]\n\n if error is not None:\n err_msg = \"%s signaled RPC for method %s was unsuccessful: %s.\" % (\n self.remote_service_coord, request[\"__method\"], error)\n logger.error(err_msg)\n result.set_exception(RPCError(error))\n else:\n result.set(response[\"__data\"])", "def verify_response_block_list(self, response):\n self.assertSetEqual(\n {block['id'] for block in response.data},\n self.non_orphaned_block_usage_keys,\n )", "def cleanup(self, batch: PayloadDictList, need_retry: bool) -> None:\n if not need_retry:\n return\n\n for event in batch:\n if not event in self.event_buffer:\n if not self.add_event(event):\n return", "def test_view_response(self, mock_mailgun_client, mock_prepare_exec_search):\n email_results = ['[email protected]', '[email protected]']\n mock_prepare_exec_search.return_value = email_results\n mock_mailgun_client.send_batch.return_value = [\n Mock(spec=Response, status_code=HTTP_200_OK, json=mocked_json()),\n Mock(spec=Response, status_code=HTTP_400_BAD_REQUEST, json=mocked_json()),\n ]\n resp_post = self.client.post(self.mail_url, data=self.request_data, format='json')\n assert resp_post.status_code == HTTP_200_OK\n assert len(resp_post.data.keys()) == 2\n for num in range(2):\n batch = 'batch_{0}'.format(num)\n assert batch in resp_post.data\n assert 'status_code' in resp_post.data[batch]\n assert 'data' in resp_post.data[batch]\n assert resp_post.data['batch_0']['status_code'] == HTTP_200_OK\n assert resp_post.data['batch_1']['status_code'] == HTTP_400_BAD_REQUEST", "def mock_api_stage_success_delete_schedule() -> List[bytes]:\n return create_standard_packets_list(DUMMY_DELETE_SCHEDULE_RESPONSE)", "def deletion_requests(_):\n return set()", "def _clear_results(self):\n self.sentence_data = []", "def _clean_outdated(self):\n now = _now()\n outdated = []\n for request_no, request_info in self._current_requests.items():\n if now - request_info.start_time > self._force_clean_after:\n outdated.append(request_no)\n if outdated:\n logging.error(\"There are {} requests which were started but haven't \"\n \"been finished in more than {}s.\"\n .format(len(outdated), self._force_clean_after))\n for request_no in outdated:\n del self._current_requests[request_no]\n self._last_autoclean_time = now", "def send_resp(self):\n self.n_send_resp += 1", "def WriteFlowResponses(self, responses):\n status_available = {}\n requests_updated = set()\n task_ids_by_request = {}\n\n for response in responses:\n flow_key = (response.client_id, response.flow_id)\n if flow_key not in self.flows:\n logging.error(\"Received response for unknown flow %s, %s.\",\n response.client_id, response.flow_id)\n continue\n\n request_dict = self.flow_requests.get(flow_key, {})\n if response.request_id not in request_dict:\n logging.error(\"Received response for unknown request %s, %s, %d.\",\n response.client_id, response.flow_id, response.request_id)\n continue\n\n response_dict = self.flow_responses.setdefault(flow_key, {})\n clone = response.Copy()\n clone.timestamp = rdfvalue.RDFDatetime.Now()\n\n response_dict.setdefault(response.request_id,\n {})[response.response_id] = clone\n\n if isinstance(response, rdf_flow_objects.FlowStatus):\n status_available[(response.client_id, response.flow_id,\n response.request_id, response.response_id)] = response\n\n request_key = (response.client_id, response.flow_id, response.request_id)\n requests_updated.add(request_key)\n try:\n task_ids_by_request[request_key] = response.task_id\n except AttributeError:\n pass\n\n # Every time we get a status we store how many responses are expected.\n for status in status_available.values():\n request_dict = self.flow_requests[(status.client_id, status.flow_id)]\n request = request_dict[status.request_id]\n request.nr_responses_expected = status.response_id\n\n # And we check for all updated requests if we need to process them.\n needs_processing = []\n for client_id, flow_id, request_id in requests_updated:\n flow_key = (client_id, flow_id)\n flow = self.flows[flow_key]\n request_dict = self.flow_requests[flow_key]\n request = request_dict[request_id]\n\n added_for_processing = False\n if request.nr_responses_expected and not request.needs_processing:\n response_dict = self.flow_responses.setdefault(flow_key, {})\n responses = response_dict.get(request_id, {})\n\n if len(responses) == request.nr_responses_expected:\n request.needs_processing = True\n self._DeleteClientActionRequest(client_id, flow_id, request_id)\n\n if flow.next_request_to_process == request_id:\n added_for_processing = True\n needs_processing.append(\n rdf_flows.FlowProcessingRequest(\n client_id=client_id, flow_id=flow_id))\n\n if (request.callback_state and\n flow.next_request_to_process == request_id and\n not added_for_processing):\n needs_processing.append(\n rdf_flows.FlowProcessingRequest(\n client_id=client_id, flow_id=flow_id))\n\n if needs_processing:\n self.WriteFlowProcessingRequests(needs_processing)\n\n return needs_processing", "def remove_field_from_every_item_in_response_copy(context, name):\n items = context.response_copy['items']\n for item in items:\n print(item)\n if item['owner']['user_type'] == 'does_not_exist':\n continue\n if name in item:\n del(item[name])\n logging.debug(\n 'Successfully removed field \"%s\" from item: %s', name,\n item['question_id']) \n else:\n logging.debug(\n 'Item %s does not contain field \"%s', name, item) \n logging.debug(\n 'Response copy after removing \"%s\" field:\\n%s', name,\n pformat(context.response_copy))", "def _decrease_counter(self, response):\n response_id = response.meta['__id']\n spot = self._request_registry[response_id]\n spot['counter'] = spot.get('counter', 0) - 1", "def cleanup(batch: PayloadDictList, need_retry: bool) -> None:\n ...", "def _itemsToResponse(self, items):\n itemsToSend = []\n count = 0\n if items:\n size = 0\n while size < self._maxSize:\n try:\n item = items.pop()\n except (KeyError, IndexError):\n # We're done.\n # Note: because records is an iterable (list or set)\n # we're catching both KeyError and IndexError.\n break\n size = size + len(item)\n itemsToSend.append(item)\n count += 1\n\n response = {\"items\": itemsToSend}\n\n if items:\n response[\"continuation\"] = self._storeContinuation(items, \"items\")\n\n return response", "def _process_response(self, response, marker_elems=None):\r\n body = response.read()\r\n #print body\r\n if '<Errors>' not in body:\r\n rs = ResultSet(marker_elems)\r\n h = handler.XmlHandler(rs, self)\r\n xml.sax.parseString(body, h)\r\n return rs\r\n else:\r\n raise MTurkRequestError(response.status, response.reason, body)", "def StopRecording( self ): \r\n\r\n self._socket.write( 'E' ) \r\n \r\n return self.GetServerResponse()", "def recv_resp(self):\n self.n_recv_resp += 1", "def test_delete_data(self):\n response = self.client.delete(self.url + str(self.current_data[-1]['id']) + '/')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n list_response_new = sorted(response.json(), key=operator.itemgetter('id'))\n self.assertLess(len(list_response_new), len(self.current_data))\n self.assertListEqual(list_response_new, self.current_data[:-1])", "def remove_all(self):\n if self._processed:\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n res, data = self._mailconn.store(msg.decode('utf-8'), '+FLAGS', '\\\\Deleted')\n print(res)", "def test_no_position(self):\n data = self.valid_payload\n data['position'] = \"\"\n response1 = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response1.status_code, status.HTTP_400_BAD_REQUEST)\n del data[\"position\"]\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response1.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def forget(self, response, request):\n pass", "def _process_response(self, resp):\n signals = []\n resp = resp.json()\n fresh_posts = posts = resp['data']\n paging = resp.get(self._paging_field) is not None\n self.logger.debug(\"Facebook response contains %d posts\" % len(posts))\n\n # we shouldn't see empty responses, but we'll protect our necks.\n if len(posts) > 0:\n self.update_freshness(posts)\n fresh_posts = self.find_fresh_posts(posts)\n paging = len(fresh_posts) == self.limit()\n\n # store the timestamp of the oldest fresh post for use in url\n # preparation later.\n if len(fresh_posts) > 0:\n self.prev_stalest = self.created_epoch(fresh_posts[-1])\n\n signals = [FacebookSignal(p) for p in fresh_posts]\n self.logger.debug(\"Found %d fresh posts\" % len(signals))\n\n return signals, paging", "def transform_misses(record):\n \n response = {}\n response[\"datasetId\"] = dict(record).get(\"stableId\") \n response[\"internalId\"] = dict(record).get(\"datasetId\")\n response[\"exists\"] = False\n # response[\"datasetId\"] = '' \n response[\"variantCount\"] = 0\n response[\"callCount\"] = 0\n response[\"sampleCount\"] = 0\n response[\"frequency\"] = 0 \n response[\"numVariants\"] = 0 \n response[\"info\"] = {\"access_type\": dict(record).get(\"accessType\")}\n\n return response", "def _get_resends(self):\n if not self.has_error():\n return []\n\n errors = []\n i = 0\n for item in self.my_json['results']:\n if item.has_key('error') and item['error'] == 'Unavailable':\n errors.append((i, item['error']))\n i += 1\n return errors", "def clear(self):\n self.failures.clear()", "def reset(self):\n self.final_result = Milter.ACCEPT\n self.new_headers = []", "def handle_delete_response(self, response):\n\n self.handle_response(response)", "def remove_record():\n # could use .../record/<name> in URL or as in this case as an argument .../record?name=bob\n if 'name' not in request.args:\n return \"need a name to delete a record!\", 400\n with RECORD_LOCK:\n if len([r for r in RECORDS if r.get('name') == request.args.get('name')]) == 0:\n return \"no such record found!\", 409\n RECORDS[:] = [r for r in RECORDS if r.get( # copy all but name matches\n 'name') != request.args.get('name')]\n return \"OK\"", "def delete_record(self):\n for record in self.records:\n if self.date_str == record[\"date\"]:\n self.records.remove(record)\n if len(self.records) > 0:\n self.write_json_file(self.records_file, self.records)\n else:\n os.remove(self.records_file)\n return True\n return False", "def test_expunge(self):\n d = self._expunge()\n self._response([3, 3, 5, 8])\n self.assertEqual(self.successResultOf(d), [3, 3, 5, 8])", "def process_response(self, request, response):\r\n microsite.clear()\r\n return response", "def run(self, batch):\n response = self.post(batch)\n log.info(\"< Discarding batch response\")\n response.close()", "def AckFlowProcessingRequests(self, requests):\n for r in requests:\n key = (r.client_id, r.flow_id)\n if key in self.flow_processing_requests:\n del self.flow_processing_requests[key]", "def delete_response(self):\n deriva_ctx.deriva_response.status = '204 No Content'\n return deriva_ctx.deriva_response", "def deep_processing_rerun_all(self):\r\n sql = \"\"\"SELECT * FROM emails \r\n WHERE email_status = 'processing' \r\n AND clean_type = 1\"\"\"\r\n df = self.db.read_sql(sql)\r\n\r\n for i in range(df.index.size):\r\n rec = df.loc[i, :]\r\n self.deep_clean_one(rec[EMAIL], dealno=rec['dealno'])\r\n self.db.con.commit()\r\n print('Reprocessed {} records that were stuck in the processing status'.format(df.index.size))", "def cleanup(self):\n deletes = []\n for item in self._collect.find({'status': 'started'}, {'_id': True}):\n deletes.append(pymongo.DeleteOne(item))\n # Remove them\n if len(deletes):\n print(\"Delete\", self._collect.bulk_write(deletes).deleted_count)", "def clean_collection(previous_records, collection):\n for previous_record in previous_records:\n collection.delete_one({\"_ref\": previous_record})", "def scrub_response(self, data):\n return self.__response_scrubber(data)", "def test_handle_response_still_nodes_uncontacted_in_shortlist(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n lookup._lookup = mock.MagicMock()\n uuids = [uuid for uuid in lookup.pending_requests.keys()]\n uuid = uuids[0]\n contact = lookup.shortlist[0]\n # Only one item in pending_requests\n for i in range(1, len(uuids)):\n del lookup.pending_requests[uuids[i]]\n self.assertEqual(1, len(lookup.pending_requests))\n # Add K-1 items from shortlist to the contacted set.\n for i in range(K - 1):\n lookup.contacted.add(lookup.shortlist[i])\n # Ensure lookup is called with the 20th (uncontacted) contact.\n not_contacted = lookup.shortlist[K - 1]\n self.assertNotIn(not_contacted, lookup.contacted)\n msg = Nodes(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal,\n self.contacts)\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n self.assertEqual(lookup._lookup.call_count, 1)\n self.node.send_find.called_once_with(not_contacted, self.target,\n FindNode)", "def process_response(self, request, response):\n return response", "def process_response(self, request, response):\n return response", "def process_response(response):\n # Print it and exit with 1 if operation wasn't successful\n print(response['message'])\n if response['status'] != 'success':\n sys.exit(1)", "def test_transform_misses(self):\n response = {\"referenceBases\": '', \"alternateBases\": '', \"variantType\": \"\",\n \"frequency\": 0, \"callCount\": 0, \"sampleCount\": 0, \"variantCount\": 0,\n \"start\": 0, \"end\": 0, \"info\": {\"accessType\": \"PUBLIC\"}}\n record = Record(\"PUBLIC\")\n result = transform_misses(record)\n self.assertEqual(result, response)", "def test_process_response(self):\n t = self.create_request_object()\n response_content = u\"\"\" <Response ReferenceNumber=\"82e942b0-48e8-4cf4-b299-51e2b6a89a1b\"\n InboundODMFileOID=\"\"\n IsTransactionSuccessful=\"1\"\n SuccessStatistics=\"Rave objects touched: Subjects=0; Folders=0; Forms=0; Fields=0; LogLines=0\" NewRecords=\"\">\n </Response>\n \"\"\"\n req = mock.Mock(requests.Request, text=response_content)\n response = t.result(req)\n self.assertTrue(isinstance(response, RWSResponse))", "def resend_buffer_packets():\n global BUFFER\n for seq in BUFFER.keys():\n packet_info = BUFFER[seq]\n msg_obj = packet_info.msg\n new_start = time.time()\n handle_packet_send(msg_obj)\n new_packet_info = PacketInfo(msg_obj, new_start)\n # Update the packet in the buffer with the new time sent at\n BUFFER[seq] = new_packet_info", "def test_response_reusage(self):\n\n post1 = self._create_db_post(content=\"@test I need a foo.\",\n channel=self.sc.inbound,\n demand_matchables=True,\n user_profile={'screen_name': 'customer'})\n self.assertTrue(self.sc.inbound_channel.is_assigned(post1))\n\n conv1 = self.sc.upsert_conversation(post1)\n post2 = self._create_db_post(content=\"I still need a foo!\",\n channel=self.sc.inbound,\n demand_matchables=True,\n user_profile={'screen_name': 'customer'})\n conv2 = self.sc.upsert_conversation(post2)\n\n resp1 = Response.objects.upsert_from_post(post1)\n resp2 = Response.objects.upsert_from_post(post2)\n self.assertEqual(conv1.id, conv2.id)\n self.assertEqual(resp1.id, resp2.id)\n self.assertTrue(resp2.post_date > resp1.post_date)", "def clear_unsuccessful_results(self):\n results = [x for x in self.get_results() if x.return_code == 0]\n self._write_results(results)\n logger.info(\"Cleared failed results from %s\", self._filename)", "def cancel_tasks(self) -> None:\n for group in self._queue.values():\n for expected_response in group.values():\n expected_response.set(None)\n self._queue = defaultdict(OrderedDict)", "def response_document( self, request, error_code, data_array ):\n\n if (request == b'CAUTH' or request == b'S_DATA') and type(data_array) is list:\n array = []\n for item in data_array: # for all the items we have to generate a different timestamp and checkum\n timestamp = self.get_time()\n checksum = self.get_checksum(timestamp, item)\n array.append([request, checksum, timestamp, error_code, item])\n # print(array)\n # print(array)\n return array", "def handle_response(self):\r\n call_if_not_none_and_callable(self._eventHandlers.popleft(),\r\n response=self.response)", "def push_transis_response_to_kinesis(self, transis_response, di_framework_client):\n detector_count_messages = transis_response.detector_count_messages.detector_count_message_list\n records = [e.to_dict() for e in detector_count_messages]\n self.kinesis_producer.push_transis_detector_count_records(records, di_framework_client)\n return {\n \"records_in_xml_doc\": len(records),\n \"collectionendtimestamp_plus_3_mins\": detector_count_messages[0].collectionendtimestamp_plus_3_mins,\n \"response_received_timestamp\": transis_response.response_received_timestamp\n }", "def process_response(self, response):\n return response", "def modify_requests(rf):\n post_url = '/docs/modify/'\n request_succ = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"fetch\",\n \"document_id\": 1,\n }),\n \"response\": {\n \"code\": 200,\n }\n },\n {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 2,\n \"type\": \"modify\",\n \"document_id\": 2,\n \"content\": \"I will modify content!\"\n }),\n \"response\": {\n \"code\": 200,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 3,\n \"type\": \"delete\",\n \"document_id\": 3,\n }),\n \"response\": {\n \"code\": 200,\n }\n }]\n request_fields_absent = [{\n \"request\": gen_request(rf, post_url, {\n \"type\": \"fetch\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"delete\",\n }),\n \"response\": {\n \"code\": 400,\n \"data\": \"It seems silly, but it can pass code smell test\"\n }\n }]\n request_user_invalid = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"\",\n \"type\": \"fetch\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"qs\",\n \"type\": \"delete\",\n \"document_id\": 1\n }),\n \"response\": {\n \"code\": 400\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 5,\n \"type\": \"delete\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"10\",\n \"type\": \"delete\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": {},\n \"type\": \"fetch\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n\n request_type_invalid = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"deleted\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"yysy\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n\n request_document_invalid = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"fetch\",\n \"document_id\": \"\"\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"fetch\",\n \"document_id\": \"happy\"\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"delete\",\n \"document_id\": 1000\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"1\",\n \"type\": \"fetch\",\n \"document_id\": {}\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n\n request_operation_on_deleted_file = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"1\",\n \"type\": \"modify\",\n \"document_id\": 5,\n \"content\": \"\"\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n\n request_content = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"1\",\n \"type\": \"modify\",\n \"document_id\": 2,\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"1\",\n \"type\": \"modify\",\n \"document_id\": 2,\n \"content\": \"\"\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n\n request_decode_error = [{\n \"request\": gen_request(rf, post_url, {\n \"null\"\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n requests = request_succ + request_fields_absent + request_user_invalid + \\\n request_type_invalid + request_document_invalid + \\\n request_content + request_operation_on_deleted_file + \\\n request_decode_error\n return requests", "def test_batch_delete_with_out_users_marked_for_deletion(self):\n # Given:\n self.batch_setup()\n # When:\n with patch(\"ras_rm_auth_service.batch_process_endpoints.requests.post\") as mock_request:\n mock_request.return_value = mock_response()\n batch_delete_request = self.client.delete(\"/api/batch/account/users\", headers=self.headers)\n # Then:\n self.assertFalse(mock_request.called)\n self.assertEqual(0, mock_request.call_count)\n self.assertEqual(batch_delete_request.status_code, 204)\n self.assertTrue(self.does_user_exists(self.user_2))\n self.assertTrue(self.does_user_exists(self.user_0))\n self.assertTrue(self.does_user_exists(self.user_1))\n self.assertTrue(self.does_user_exists(self.user_3))", "def keep_documents(self, idx):\n print('{} documents have been removed'.format(self.data.shape[0] - len(idx)))\n self.documents = [self.documents[i] for i in idx]\n self.labels = self.labels[idx]\n self.data = self.data[idx, :]", "def handle_response(self, response):\n with self.lock:\n req_id, status, message = response\n if req_id in self.pending_requests: # request may have timed out\n self.pending_requests[req_id].set((status, message))", "def process_response(self, request, response):\n if hasattr(threadlocal, 'auditlog'):\n pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'])\n\n return response", "def check_response(func):\n\n def _wrapper(*args, **kw):\n with _mutex:\n _message[:] = [] # why Python list has no clear() ??\n\n result, status = func(*args, **kw)\n if not status.success:\n err_msg = \"Error running command [%s], reason: %s\" % (func.__name__, status.reason)\n raise error.BigflowRPCException(err_msg)\n\n return result\n return _wrapper", "def get_and_delete_messages (self):\n return []", "def get_and_delete_messages (self):\n return []", "def resent_frames(self):\n try:\n for k,f in self.frames.items():\n if time.time() - f['time'] > 0.500:\n self.log.warning(\"resend frame %d:%s\" % (k, f['msg']))\n self.__send_frame(k, f['msg'])\n except RuntimeError:\n pass # dictionary changed size during iteration", "def _leftovers(self, fl):\n try:\n data = self.sock.recv(1024, fl)\n except socket.error as _:\n return False\n if len(data) != 0:\n tail = data\n while True:\n (head, tail) = Ctrl().split_combined(tail)\n print(\"Got message:\", Ctrl().rem_header(head))\n if len(tail) == 0:\n break\n return True\n return False", "def delete(event, context):\n\n if type(event) == str:\n event = json.loads(event)\n\n for record in event['Records']:\n data = json.loads(record['Sns']['Message'])\n\n item = table.get_item(\n Key={\n 'id':data['id']\n }\n )\n\n if 'Item' in item:\n result = table.update_item(\n Key={ 'id': data['id']},\n ExpressionAttributeNames={\n '#is_deleted': 'deleted'\n },\n ExpressionAttributeValues={\n ':delete_flg': True,\n ':updatedAt': str(dt.datetime.utcnow())\n },\n UpdateExpression='SET #is_deleted = :delete_flg, modifiedAt = :updatedAt',\n ReturnValues='ALL_NEW'\n )\n\n\n response = {\n 'statusCode': 200\n }\n\n return response", "def ungraded_response(self, queue_msg, queuekey):\r\n # check the queuekey against the saved queuekey\r\n if('queuestate' in self.input_state and self.input_state['queuestate'] == 'queued'\r\n and self.input_state['queuekey'] == queuekey):\r\n msg = self._parse_data(queue_msg)\r\n # save the queue message so that it can be rendered later\r\n self.input_state['queue_msg'] = msg\r\n self.input_state['queuestate'] = None\r\n self.input_state['queuekey'] = None", "def remove_reagents_from_container(request):\n container_id = int(request.POST['container_id'])\n positions = request.POST['positions']\n positions = json.loads(positions)\n current_container = Container.objects.get(id=container_id)\n\n for position in positions:\n row = int(position[0])\n column = int(position[1])\n print(row, column)\n position_query = ContainerContent.objects.filter(row__exact=row,\n column__exact=column,\n container=current_container,\n )\n print(position_query)\n position_query.delete()\n return JsonResponse({'success': True})", "def test_invalid_batch(self):\n req = '[1,2,3]'\n resp = '''[\n {\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Invalid JSON-RPC Message; must be an object\"}, \"id\": null},\n {\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Invalid JSON-RPC Message; must be an object\"}, \"id\": null},\n {\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Invalid JSON-RPC Message; must be an object\"}, \"id\": null}\n ]'''\n status = 200\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))", "def process_response(self, response):\n json = response.json()\n for resp in json[\"responses\"]:\n sub_qry = self._current_query.get(int(resp[\"id\"]))\n self.context.pending_request().map_json(resp[\"body\"], sub_qry.return_type)", "def _delete_tag_response(response):\n if 'errortext' in response:\n if 'Unable to find resource by id' in response['errortext']:\n errors.invalid_resource_id()\n\n return {\n 'template_name_or_list': 'status.xml',\n 'response_type': 'DeleteTagsResponse',\n 'return': 'true'\n }", "def normalize_bulk_return(fun):\n @wraps(fun)\n def _fixed_bulk(self, *args, **kwargs):\n def fix_item(item):\n if 'status' in item['index']:\n item['index']['ok'] = (\n 200 <= item['index']['status'] < 300)\n return item\n\n ret = fun(self, *args, **kwargs)\n if 'items' in ret:\n ret['items'] = [fix_item(item) for item in ret['items']]\n return ret\n return _fixed_bulk", "def testSplitResponseOverSeveralWrites(self):\n self.client_connect()\n self.client_send('set splitResponse 0 0 1\\r\\n')\n self.client_send('1\\r\\n')\n self.mock_recv(\"set splitResponse 0 0 1\\r\\n1\\r\\n\")\n self.mock_send('STO')\n self.wait(1)\n self.mock_send('RED')\n self.wait(1)\n self.mock_send('\\r')\n self.wait(1)\n self.mock_send('\\n')\n self.client_recv('STORED\\r\\n')", "def receive_response(self, transaction):\n host, port = transaction.response.source\n key_token = hash(str(host) + str(port) + str(transaction.response.token))\n if key_token in self._block1_sent and transaction.response.block1 is not None:\n item = self._block1_sent[key_token]\n transaction.block_transfer = True\n if item.m == 0:\n transaction.block_transfer = False\n del transaction.request.block1\n return transaction\n n_num, n_m, n_size = transaction.response.block1\n if n_num != item.num: # pragma: no cover\n logger.warning(\"Blockwise num acknowledged error, expected \" + str(item.num) + \" received \" +\n str(n_num))\n return None\n if n_size < item.size:\n logger.debug(\"Scale down size, was \" + str(item.size) + \" become \" + str(n_size))\n item.size = n_size\n request = transaction.request\n del request.mid\n del request.block1\n request.payload = item.payload[item.byte: item.byte+item.size]\n item.num += 1\n item.byte += item.size\n if len(item.payload) <= item.byte:\n m = 0\n else:\n m = 1\n request.block1 = (item.num, m, item.size)\n elif transaction.response.block2 is not None:\n\n num, m, size = transaction.response.block2\n if m == 1:\n transaction.block_transfer = True\n if key_token in self._block2_sent:\n item = self._block2_sent[key_token]\n if num != item.num: # pragma: no cover\n logger.error(\"Receive unwanted block\")\n return self.error(transaction, defines.Codes.REQUEST_ENTITY_INCOMPLETE.number)\n if item.content_type is None:\n item.content_type = transaction.response.content_type\n if item.content_type != transaction.response.content_type: # pragma: no cover\n logger.error(\"Content-type Error\")\n return self.error(transaction, defines.Codes.UNSUPPORTED_CONTENT_FORMAT.number)\n item.byte += size\n item.num = num + 1\n item.size = size\n item.m = m\n item.payload += transaction.response.payload\n else:\n item = BlockItem(size, num + 1, m, size, transaction.response.payload,\n transaction.response.content_type)\n self._block2_sent[key_token] = item\n request = transaction.request\n del request.mid\n del request.block2\n request.block2 = (item.num, 0, item.size)\n else:\n transaction.block_transfer = False\n if key_token in self._block2_sent:\n if self._block2_sent[key_token].content_type != transaction.response.content_type: # pragma: no cover\n logger.error(\"Content-type Error\")\n return self.error(transaction, defines.Codes.UNSUPPORTED_CONTENT_FORMAT.number)\n transaction.response.payload = self._block2_sent[key_token].payload + transaction.response.payload\n del self._block2_sent[key_token]\n else:\n transaction.block_transfer = False\n return transaction" ]
[ "0.6261296", "0.5926714", "0.5847716", "0.58347917", "0.5816505", "0.568727", "0.5628807", "0.55821556", "0.55515593", "0.5497828", "0.5445062", "0.5384468", "0.5301025", "0.52775407", "0.5272712", "0.52238524", "0.5213868", "0.51214164", "0.51052916", "0.50793844", "0.5073877", "0.5073877", "0.5070564", "0.50572306", "0.5054251", "0.50480604", "0.5043665", "0.5029975", "0.50294036", "0.5027412", "0.50102293", "0.49904075", "0.49845755", "0.49836114", "0.49801055", "0.49748313", "0.4954745", "0.49536523", "0.49505407", "0.4933108", "0.49229974", "0.49164835", "0.4912218", "0.49065334", "0.4894175", "0.48938924", "0.48874885", "0.4887092", "0.4886409", "0.48664877", "0.48622325", "0.4860893", "0.48568842", "0.48536578", "0.48525938", "0.48498753", "0.4841661", "0.48340955", "0.48314402", "0.48265508", "0.48244467", "0.48215333", "0.48114017", "0.4805043", "0.47940022", "0.47876328", "0.47704023", "0.47687507", "0.47647154", "0.47647154", "0.4761806", "0.47569168", "0.47546035", "0.47526476", "0.47439587", "0.47387952", "0.47387445", "0.47384998", "0.47361168", "0.47349733", "0.47349185", "0.47346345", "0.4720637", "0.471916", "0.4707092", "0.4707049", "0.4703934", "0.47035146", "0.47035146", "0.47033584", "0.4699566", "0.46966588", "0.46955025", "0.4692296", "0.46916065", "0.4688335", "0.46849757", "0.46845603", "0.46807873", "0.46807632" ]
0.7417818
0
Inspect the response to see if the failure was our fault (the Sender)
def _check_failures(self, response, batch=None): if not response.get('Failed'): return 0 # nothing to do here LOGGER.error('The following records failed to put to queue %s', self.queue.url) for failure in response['Failed']: # Pull out the record that matches this ID record = self._extract_message_by_id(batch, failure['Id']) if batch else None LOGGER.error(self._format_failure_message(failure, record=record)) failed = len(response.get('Failed', [])) self._log_failed(failed) # Raise an exception if this is the fault of the sender (us) if any(result['SenderFault'] for result in response['Failed']): raise SQSClientError('Failed to send records to SQS:\n{}'.format(response)) return failed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testFailure(self):\n request = b'hello'\n reply = self.sendAndReceive(request)\n self.assertEqual(2, reply[0])", "def check_response_errors(self, resp):\n return True", "def check_response(response):\n status = response.get('status')\n ret = status and status == 'OK'\n if not ret:\n logging.error('Received unexpected failure response from polyswarmd: %s', response)\n return ret", "def server_failure(self, resp):\n return resp[0] in FAILURE_CODES", "def _check_response(response: requests.Response) -> None:\n logger.debug('Received response:\\n%s', response.content)\n try:\n response.raise_for_status()\n if not response.json()['status']:\n _report_failure('your e-mail address appears to be invalid')\n except requests.exceptions.HTTPError:\n _report_failure()\n except (ValueError, KeyError):\n _report_failure('there was a problem with the server response')", "def error_invalid_response(self):\r\n return self.type() == 0x00", "def test_handle_response_wrong_message_type(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n contact = lookup.shortlist[0]\n msg = OK(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal)\n response = asyncio.Future()\n response.set_result(msg)\n lookup._blacklist = mock.MagicMock()\n lookup._handle_error = mock.MagicMock()\n lookup._handle_response(uuid, contact, response)\n lookup._blacklist.assert_called_once_with(contact)\n self.assertEqual(lookup._handle_error.call_count, 1)\n args = lookup._handle_error.call_args[0]\n self.assertEqual(args[0], uuid)\n self.assertEqual(args[1], contact)\n self.assertIsInstance(args[2], TypeError)\n self.assertEqual(args[2].args[0],\n \"Unexpected response type from {}\".format(contact))", "def has_error(self, response):\n return response.find(' Matched') == -1 and response.find(' Failed') == -1", "def validate_response(response: json):\n if \"error\" in response:\n print(\"ERROR: Request returned error\")\n print_request_response(response)\n exit(1)", "def _response_failure(self, failure, msgID):\r\n if not self._status:\r\n # Can not help it if the response takes some time and in the mean\r\n # time the interface is disabled; therefore, don't raise an error\r\n # instead just skip sending the response\r\n return\r\n\r\n # TODO: Return something useful to the cloud here!\r\n print('Service call failed.')", "def _is_error_call(self, response):\n status = response.get('ResponseMetadata', {}).get('HTTPStatusCode')\n return status != 200", "def _check_response_for_request_errors(self):\r\n if self.response.HighestSeverity == \"ERROR\":\r\n for notification in self.response.Notifications:\r\n if notification.Severity == \"ERROR\":\r\n raise FedexError(notification.Code,\r\n notification.Message)", "def validate_response(response):\n assert response.ok\n rpcdict = response.json()\n assert rpcdict['jsonrpc'] == '2.0'\n assert rpcdict['id']\n assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()", "def __check_response_for_fedex_error(self):\r\n if self.response.HighestSeverity == \"FAILURE\":\r\n for notification in self.response.Notifications:\r\n if notification.Severity == \"FAILURE\":\r\n raise FedexFailure(notification.Code,\r\n notification.Message)", "def recvcheck(self):\n\n data = self.recv()\n if self.serializer == 'json' and data and isinstance(data, dict):\n if '@error' in data:\n exception = getattr(builtins, data['@error'])\n if (isinstance(exception, type) and\n issubclass(exception, Exception)):\n raise exception(data['@message'])\n else:\n if isinstance(data, Exception):\n raise data\n return data", "def _error_response(self):\r\n response_dict = {'success': False, 'version': 1}\r\n self.send_response(\r\n 400, content=json.dumps(response_dict),\r\n headers={'Content-type': 'application/json'}\r\n )", "def check_response(rv):\n if rv != 'OK':\n print \"No message found\"\n return False\n return True", "def request_was_successful(response):\n status_code = response.status_code\n try:\n response_json = response.json()\n except ValueError:\n print(\"Unable to parse JSON from server for %s response: %s\" % (response.url, response))\n return False\n if not response.ok:\n print(\"Server response Not OK. \\n Sent request: %s \\n Status code: %i\" % (response.url, status_code))\n print(response_json)\n return False\n if (status_code != 200):\n print(\"Bad response from server. \\n Sent request: %s \\n Status code: %i\" % (response.url, status_code))\n print(response)\n return False\n if response_json is None:\n print(\"ERROR: could not search Eventful for url %s\" % (response.url))\n return False\n if response.headers.get('Content-length') == '0':\n print (\"No content from Eventful for request: \" + response.url)\n return False\n return True", "def checkError(invoke_response, message):\n\n if 'FunctionError' in invoke_response:\n err_message = invoke_response['Payload'].read()\n print(message)\n print(err_message)\n return {\n 'statusCode': 500,\n 'body': json.dumps(str(err_message))\n }\n return None", "def is_error(response: str) -> bool:\n return \"ERROR\" in response", "def check_error(self, response):\n if type(response) is dict and response.has_key('status_code'):\n if response['status_code'] != 200:\n raise rocket.RocketAPIException(response['status_code'],\n response['status_text'])", "def test_response_error(err_msg):\n from server import response_error\n error_text = b'HTTP/1.1 %s' % err_msg\n assert response_error(err_msg).split(b'\\r\\n')[0] == error_text", "def verif_response(response):\n if response.status_code >= 200 and response.status_code <= 299:\n logging.debug(\"response server OK::{}\".format(response.text))\n return True\n\n logging.error(\"response server KO::{}\".format(response.text))\n return False", "def _check_response_for_request_errors(self):\r\n if self.response.HighestSeverity == \"ERROR\":\r\n for notification in self.response.Notifications:\r\n if notification.Severity == \"ERROR\":\r\n if \"Invalid tracking number\" in notification.Message:\r\n raise FedexInvalidTrackingNumber(notification.Code,\r\n notification.Message)\r\n else:\r\n raise FedexError(notification.Code,\r\n notification.Message)", "async def _handle_response(response: ClientResponse) -> Dict:\n content = await response.json(encoding='utf-8', loads=loads)\n if response.status != 200:\n for member in JmRpcErrorType:\n if content['message'] != member.value:\n continue\n raise JmRpcError(response.status, content)\n response.raise_for_status()\n return content", "def _check_response(self, res: requests.Response, token: str) -> None:\n return", "def validate_response(self, response):\n pass", "def errorReceived(results):\n self.client.transport.loseConnection()\n self.server.transport.loseConnection()\n\n # Check what the server logs\n errors = self.flushLoggedErrors(imap4.IllegalQueryError)\n self.assertEqual(len(errors), 1)\n\n # Verify exception given to client has the correct message\n self.assertEqual(\n str(b\"SEARCH failed: Invalid search command FOO\"),\n str(results),\n )", "def checkResponseOK(response):\n assert response['result'] == 'OK'", "def _err_response(self, msg):\r\n return {'success': False, 'error': msg}", "def server_failure(self, resp, ignore_codes=[]):\n return (resp[0] in FAILURE_CODES and resp[0] not in ignore_codes)", "def errorReceived(results):\n self.client.transport.loseConnection()\n self.server.transport.loseConnection()\n\n # Check what the server logs\n errors = self.flushLoggedErrors(imap4.IllegalQueryError)\n self.assertEqual(len(errors), 1)\n\n # Verify exception given to client has the correct message\n self.assertEqual(\n str(b\"SEARCH failed: FOO is not a valid search criteria\"),\n str(results))", "def test_unknown_result(self):\r\n data = {\r\n \"EdX-ID\": self.receipt_id,\r\n \"Result\": 'Unknown',\r\n \"Reason\": 'Unknown reason',\r\n \"MessageType\": 'Unknown message'\r\n }\r\n json_data = json.dumps(data)\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=json_data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('Result Unknown not understood', response.content)", "def _on_tracking_failure(self, response, data):\n try:\n response = json.loads(response)\n except:\n # the response should be in JSON, but in case it can't be parsed just try another attempt\n logging.debug(\"cannot parse tracker response, should be valid JSON\")\n return response\n\n # remove the successfully tracked hits from payload\n tracked = response['tracked']\n data['requests'] = data['requests'][tracked:]\n\n return response['message']", "def verify_error_message(self, response, error_message):\n self.assertEqual(response.status_code, 400)\n response = json.loads(response.content.decode('utf-8'))\n self.assertIn('error', response)\n self.assertEqual(response['error'], error_message)", "def process_response(response):\n # Print it and exit with 1 if operation wasn't successful\n print(response['message'])\n if response['status'] != 'success':\n sys.exit(1)", "def __CheckResponse(self, response):\n\n status = response.status\n if (status == httplib.OK or status == httplib.CREATED\n or status == httplib.NO_CONTENT):\n return\n elif (status == httplib.UNAUTHORIZED):\n raise BadCredentialsException\n elif (status == httplib.SERVICE_UNAVAILABLE):\n raise ServerBusyException\n elif (status == httplib.BAD_REQUEST\n or status == httplib.UNPROCESSABLE_ENTITY):\n raise BadArgumentsException\n elif (status == httplib.NOT_FOUND):\n raise NotFoundException\n else:\n raise BadOperationException", "def test_rsp_invalid(self):\n\n def handle(event):\n return 0x0000, event.action_information\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ProceduralEventLogging)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_ACTION, handle)]\n )\n\n ae.add_requested_context(ProceduralEventLogging)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n class DummyResponse:\n is_valid_response = False\n\n class DummyDIMSE:\n msg_queue = queue.Queue()\n gotten = False\n\n def send_msg(*args, **kwargs):\n return\n\n def get_msg(self, *args, **kwargs):\n if not self.gotten:\n self.gotten = True\n return None, DummyResponse()\n return None, None\n\n assoc._reactor_checkpoint.clear()\n while not assoc._is_paused:\n time.sleep(0.01)\n assoc.dimse = DummyDIMSE()\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_action(\n ds, 1, ProceduralEventLogging, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status == Dataset()\n assert ds is None\n assert assoc.is_aborted\n\n scp.shutdown()", "def is_error(self):\r\n if self.status not in [STATUS_CODES['200'], ]:\r\n return True\r\n else:\r\n return False", "def test_remote_errors_give_502(self):\n\n self.assertEqual(\n self._request(self._make_dummy_notification([DEVICE_REMOTE_ERROR])), 502\n )\n\n # we also check that a successful device doesn't hide the exception\n self.assertEqual(\n self._request(\n self._make_dummy_notification([DEVICE_ACCEPTED, DEVICE_REMOTE_ERROR])\n ),\n 502,\n )\n\n self.assertEqual(\n self._request(\n self._make_dummy_notification([DEVICE_REMOTE_ERROR, DEVICE_ACCEPTED])\n ),\n 502,\n )", "def process_post_result(resp):\n resp_json = resp.json()\n if 'message' in resp_json:\n message = resp_json['message']\n print_info(f'{message}.')\n return\n\n raise Exception(f'{response_message(resp_json)}')", "def response_error(response):\n if response.headers.get('X-RateLimit-Remaining') is not None:\n if int(response.headers['X-RateLimit-Remaining']) == 0:\n sys.stderr.write('Error: Rate Limit Reached, will reset in ' + response.headers.get(\n 'X-RateLimit-Reset') + ' seconds \\n')\n return True\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as error:\n sys.stderr.write(\"\\nRequest Error:\\t %s\" % error.message)\n try:\n sys.stderr.write(\"\\nError code:\\t %s\" % response.json()['errorCode'])\n sys.stderr.write(\"\\nError message:\\t %s \" % response.json()['message'])\n except (ValueError, KeyError):\n pass\n\n if response.status_code == 500:\n sys.stderr.write('Your account may have no owner assigned. '\n 'Please visit www.logentries.com for information on '\n 'assigning an account owner. \\n')\n return True\n\n if response.status_code == 200:\n if response.headers['Content-Type'] != 'application/json':\n sys.stderr.write('Unexpected Content Type Received in Response: ' + response.headers[\n 'Content-Type'])\n return True\n else:\n return False\n return False", "def _assert_bad_request(self, response, field, zendesk_mock_class, datadog_mock):\r\n self.assertEqual(response.status_code, 400)\r\n resp_json = json.loads(response.content)\r\n self.assertTrue(\"field\" in resp_json)\r\n self.assertEqual(resp_json[\"field\"], field)\r\n self.assertTrue(\"error\" in resp_json)\r\n # There should be absolutely no interaction with Zendesk\r\n self.assertFalse(zendesk_mock_class.return_value.mock_calls)\r\n self.assertFalse(datadog_mock.mock_calls)", "def _check_response(self, response):\n if response is None:\n raise TypeError(\"Expected ElementTree, got '%s' instead\" % type(response))\n\n status = response.get('status')\n\n if status is None:\n raise RunTimeError('response is missing status: %s'\n % etree.tostring(response))\n if status.startswith('4'):\n raise ClientError(\"[%s] %s: %s\" % (status,\n response.tag,\n response.get('status_text')))\n\n elif status.startswith('5'):\n raise ServerError(\"[%s] %s: %s\" %(status,\n response.tag,\n response.get('status_text')))\n\n return status", "def mora_assert(response):\n assert response.status_code in (200, 201, 400, 404), response.status_code\n if response.status_code == 400:\n # Check actual response\n assert (\n response.text.find(\"not give raise to a new registration\") > 0\n ), response.text\n logger.debug(\"Request had no effect\")\n return None", "def test_redemption_unknown_response(\n self, voucher: Voucher, counter: int, extra_tokens: int\n ) -> None:\n details = \"mysterious\"\n num_tokens = counter + extra_tokens\n issuer = UnsuccessfulRedemption(details)\n treq = treq_for_loopback_ristretto(issuer)\n redeemer = RistrettoRedeemer(treq, NOWHERE)\n random_tokens = redeemer.random_tokens_for_voucher(voucher, counter, num_tokens)\n d = redeemer.redeemWithCounter(\n voucher,\n counter,\n random_tokens,\n )\n self.assertThat(\n Deferred.fromCoroutine(d),\n failed(\n AfterPreprocessing(\n lambda f: f.value,\n Equals(\n UnrecognizedFailureReason(\n {\n \"success\": False,\n \"reason\": details,\n }\n )\n ),\n ),\n ),\n )", "def validate_response(self, response: requests.Response) -> None:\n if 400 <= response.status_code < 500:\n msg = (\n f\"{response.status_code} Client Error: \"\n f\"{response.reason} for path: {self.path}. \"\n f\"Request payload: {response.request.body}\"\n )\n raise FatalAPIError(msg)\n\n elif 500 <= response.status_code < 600:\n msg = (\n f\"{response.status_code} Server Error: \"\n f\"{response.reason} for path: {self.path}\"\n )\n raise RetriableAPIError(msg)", "def response_check(response):\n print(response)\n print(response.text)\n return response.status_code == 200", "def validate_response(response):\n\n r = response\n try:\n r.raise_for_status()\n except HTTPError as e:\n message = dict(status_code=r.status_code, exception=e)\n\n try:\n response = r.json()\n message['response'] = response\n except JSONDecodeError as e:\n message['response'] = r.content\n\n raise HTTPError(message)", "def testRemoteError(self):\n\n def _testStreamError(res):\n self.assertEqual(True, isinstance(res.value, httpb_client.HTTPBNetworkTerminated))\n\n self.assertEqual(True, res.value.body_tag.hasAttribute('condition'), 'No attribute condition')\n # This is not a stream error because we sent invalid xml\n self.assertEqual(res.value.body_tag['condition'], 'remote-stream-error')\n self.assertEqual(True, len(res.value.elements)>0)\n # The XML should exactly match the error XML sent by triggerStreamError().\n self.assertEqual(True,xpath.XPathQuery(\"/error\").matches(res.value.elements[0]))\n self.assertEqual(True,xpath.XPathQuery(\"/error/policy-violation\").matches(res.value.elements[0]))\n self.assertEqual(True,xpath.XPathQuery(\"/error/arbitrary-extension\").matches(res.value.elements[0]))\n self.assertEqual(True,xpath.XPathQuery(\"/error/text[text() = 'Error text']\").matches(res.value.elements[0]))\n\n\n\n def _failStreamError(res):\n self.fail('A stream error needs to be returned')\n\n def _testSessionCreate(res):\n self.sid = res[0]['sid']\n # this xml is valid, just for testing\n # the point is to wait for a stream error\n d = self.send('<fdsfd/>')\n d.addCallback(_failStreamError)\n d.addErrback(_testStreamError)\n self.server_protocol.triggerStreamError()\n\n return d\n\n BOSH_XML = \"\"\"<body content='text/xml; charset=utf-8'\n hold='1'\n rid='%(rid)i'\n to='localhost'\n route='xmpp:127.0.0.1:%(server_port)i'\n ver='1.6'\n wait='60'\n ack='1'\n xml:lang='en'\n xmlns='http://jabber.org/protocol/httpbind'/>\n \"\"\"% { \"rid\": self.rid, \"server_port\": self.server_port }\n\n d = self.proxy.connect(BOSH_XML).addCallback(_testSessionCreate)\n\n return d", "def testFailedResponse(self):\n self.mgr.sendState = Mock()\n message = (mavutil.mavlink.GOPRO_COMMAND_VIDEO_SETTINGS, mavutil.mavlink.GOPRO_REQUEST_FAILED)\n self.mgr.set_response_callback('vehicle','name', message)\n self.mgr.sendState.assert_called_with()\n self.mgr.processMsgQueue.assert_called_with()", "def test_patch_actor_assistant_permissions_500(self): # assistant cannot patch actors\r\n res = self.client().patch('/actors/1', json=partial_actor, headers=casting_assistant)\r\n data = json.loads(res.data)\r\n\r\n self.assertEqual(res.status_code, 500)\r\n self.assertFalse(data[\"success\"])\r\n self.assertEqual(data[\"message\"], \"internal server error\")", "def test_parse_error_response(self):\n self.assertEqual(\n parse_server_answer(ERROR_SERVER_RESPONSE),\n f'Bad response. {ERROR_SERVER_RESPONSE[RESPONSE]}: {ERROR_SERVER_RESPONSE[ERROR]}'\n )", "def handle_error_response(resp):\n error_message = ''\n error_message_with_reason = ''\n try:\n error_message = (\n resp.json()\n .get('fireeyeapis', {})\n .get('description', '')\n .strip()\n )\n error_message = error_message.replace('\\n', '')\n if error_message:\n error_message_with_reason = f'Reason: {error_message}'\n except ValueError: # ignoring json parsing errors\n pass\n if resp.headers.get('Content-Type', '') == CONTENT_TYPE_ZIP:\n error_message = error_message_with_reason = resp.text\n\n status_code_messages = {\n 400: f\"{MESSAGES['BAD_REQUEST_ERROR']} {error_message_with_reason}\",\n 401: MESSAGES['AUTHENTICATION_ERROR'],\n 403: error_message,\n 404: error_message,\n 406: error_message,\n 407: MESSAGES['PROXY_ERROR'],\n 500: MESSAGES['INTERNAL_SERVER_ERROR'],\n 503: MESSAGES['INTERNAL_SERVER_ERROR'],\n }\n\n if resp.status_code in status_code_messages:\n demisto.debug(\n f'Response Code: {resp.status_code}, Reason: {status_code_messages[resp.status_code]}'\n )\n raise DemistoException(status_code_messages[resp.status_code])\n else:\n raise DemistoException(resp.raise_for_status())", "def _FailureResponse(args_dict=None):\n if args_dict is None:\n args_dict = {}\n args_dict[\"code\"] = \"Fail\"\n return CGateway._DumpResponse(args_dict)", "def _check_response_for_request_errors(self):\r\n if self.response.HighestSeverity == \"ERROR\":\r\n for notification in self.response.Notifications:\r\n if notification.Severity == \"ERROR\":\r\n if \"Postal Code Not Found\" in notification.Message:\r\n raise FedexPostalCodeNotFound(notification.Code,\r\n notification.Message)\r\n\r\n elif \"Invalid Postal Code Format\" in self.response.Notifications:\r\n raise FedexInvalidPostalCodeFormat(notification.Code,\r\n notification.Message)\r\n else:\r\n raise FedexError(notification.Code,\r\n notification.Message)", "def raise_for_status(response):\n if response.status_code != 200:\n res_data = response.json()\n if (response.status_code, res_data['error']) in error_map:\n raise error_map[(response.status_code, res_data['error'])](res_data['error_description'])\n raise ShoperApiError(res_data['error_description'])\n\n return response", "def _get_response(self):\n resp = yield from self._responses.get()\n if len(resp) == 1:\n line = resp[0]\n if line.startswith('ACK'):\n i = line.index('@')\n error = line[5:i]\n l = line[i+1:line.index(']')]\n command = line[line.index('{')+1:line.index('}')]\n msg = line[line.index('}')+1:]\n raise MpdCommandException(line, error, l, command, msg)\n return resp", "def is_valid_response(self, response):\r\n if response.status_code in VALID_CODES:\r\n return True\r\n return False", "def _extract_error(self, resp):\n reason = resp.headers.get(\"reason\", None)\n full_response = None\n\n if reason is None:\n try:\n # if response is in json format\n reason = resp.json()[\"error\"][\"msg\"]\n except KeyError:\n # if json response has unexpected structure\n full_response = resp.content\n except ValueError:\n # otherwise we assume it's html\n reason, full_html = self._scrape_response(resp.headers, resp.content)\n full_response = unescape_html(full_html)\n\n msg = \"[Reason: %s]\" % reason\n\n if reason is None:\n msg += \"\\n%s\" % full_response\n\n return msg", "def _get_error_message(response):\n try:\n return response.json()[\"detail\"]\n except (KeyError, _JSONDecodeError):\n return response.text", "def test_handle_response_value_message_wrong_key(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n uuids = [uuid for uuid in lookup.pending_requests.keys()]\n uuid = uuids[0]\n contact = lookup.shortlist[0]\n msg = Value(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, 'f00baa',\n 'value', time.time(), time.time() + 99999, self.version,\n PUBLIC_KEY, 'name', 'signature')\n response = asyncio.Future()\n response.set_result(msg)\n lookup._blacklist = mock.MagicMock()\n lookup._handle_error = mock.MagicMock()\n lookup._handle_response(uuid, contact, response)\n lookup._blacklist.assert_called_once_with(contact)\n self.assertEqual(lookup._handle_error.call_count, 1)\n args = lookup._handle_error.call_args[0]\n self.assertEqual(args[0], uuid)\n self.assertEqual(args[1], contact)\n self.assertIsInstance(args[2], ValueError)\n self.assertEqual(args[2].args[0],\n \"Value with wrong key returned by {}\"\n .format(contact))", "def send_rpc_error(req, rpcreq, e):", "def test_404(self):\n response = self.make_call(origin='Milano Lambrate', destination='Milano Cadorna')\n self.assert400(response)", "def validate_reply(request, reply):\n assert isinstance(reply, dict) and 'id' in reply\n assert ('result' in reply) != ('error' in reply)\n assert reply['id'] == request['id'] or \\\n reply['id'] == '00' and 'error' in reply", "def _is_successful(response) -> bool:\n return response.status_code == 200", "def failure(self, error):\n \n self.request.response.status_int = 400\n return None", "def test_http_error_raised(self):\n\n self.app.app.preprocess_request()\n\n err = HTTPError(http_status.HTTP_404_NOT_FOUND)\n\n resp = self.r(err)\n\n self.assertIn(\n err.to_data()['message_short'],\n resp[0].decode(),\n )\n self.assertEqual(\n http_status.HTTP_404_NOT_FOUND,\n resp[1],\n )", "def _check_response_status(response):\n # TODO: Test!\n if response.status == 1:\n raise InvalidLength(f\"{response.info}: Message is too long.\")\n elif response.status == 2:\n raise InvalidXML(f\"{response.info}: Invalid XML content.\")\n elif response.status == 3:\n raise WrongContent(\n f\"{response.info}: Wrong data content. \"\n f\"Ex: too long text in a field.\"\n )\n elif response.status == 4:\n raise NotAuthorized(\n f\"{response.info}: Wrong combination of \"\n f\"transmitter, instance and password\"\n )\n elif response.status == 5:\n # Fail over should kick in.\n raise NotTreatedNotDistributed(\n f\"{response.info}: Error in \"\n f\"processing. It has neither been \"\n f\"treated or distributed\"\n )\n elif response.status == 7:\n raise MandatoryDataMissing(\n f\"{response.info}: Mandatory XML tag \" f\"missing\"\n )\n elif response.status == 9:\n raise ServiceUnavailable(\n (\n f\"{response.info}: Heartbeat is not enabled on the server for \"\n f\"this transmitter or you are not authorized to use it.\"\n )\n )\n elif response.status == 10:\n raise DuplicateAlarm(\n f\"{response.info}: The same alarm was received\" f\" multiple times\"\n )\n elif response.status == 98:\n raise ServerSystemError(f\"{response.info}: General receiver error\")\n elif response.status == 99:\n # Failover should kick in.\n raise OtherError(f\"{response.info}: Unknown receiver error\")\n elif response.status == 100:\n raise XMLHeaderError(f\"{response.info}: Invalid or missing XML \" f\"header\")\n elif response.status == 101:\n raise PingToOften(f\"{response.info}: Heartbeat is sent too often\")", "def check_response(self, xml_str, xml_name):\n\n if \"<ok/>\" not in xml_str:\n self.module.fail_json(msg='Error: %s failed.' % xml_name)", "def test_http_error_raised(self):\n resp = self.r(HTTPError(http_status.HTTP_404_NOT_FOUND))\n\n msg = HTTPError.error_msgs[http_status.HTTP_404_NOT_FOUND]\n\n self.assertEqual(\n (\n {\n 'code': http_status.HTTP_404_NOT_FOUND,\n 'referrer': None,\n 'message_short': msg['message_short'],\n 'message_long': msg['message_long'],\n },\n http_status.HTTP_404_NOT_FOUND,\n ),\n (json.loads(resp[0]), http_status.HTTP_404_NOT_FOUND, ),\n )", "def test_failed_status(self):\n def mock_send_request(*args, **kwargs):\n return Response().fail(code='FAKE-ERROR').response\n\n response = support.run_remote_command(\n command='sync',\n mock_send_request=mock_send_request\n )\n self.assertTrue(response.failed)\n self.assert_has_error_code(response, 'FAKE-ERROR')", "def user_should_get_an_ok_response():\n assert web_app.validate_reponse()", "def _validate_response(self, response):\n # Check for unexpected response - all should be JSON dicts that have\n # already been deserialised\n if not isinstance(response, types.DictionaryType):\n self.message(\n \"\\t\\t[!] ERROR - Unexpected value returned from the API: '%s'\" %\n (response))\n return False\n\n # Check for valid errors\n if \"error\" in response and \"msg\" in response:\n self.message(\n \"\\t\\t[!] ERROR - %s (%s)\" %\n (response[\"msg\"], response[\"timestamp\"]))\n return False\n\n # Is this a valid response message\n if \"msg\" in response:\n return True\n\n # Catch all...dictionary returned but does not contain expected keys?\n # Who know's what's going on here?!\n else:\n self.message(\n \"\\t\\t[!] ERROR - Unexpected dictionary response returned from the API: '%s'\" %\n (response))\n return False", "def the_response_should_be_result(result):\n assert web_app.check_response(result)", "def raise_for_response(self, response):\n try:\n code = response.errors[0][0]._code\n\n if code == 'invalidRecipient':\n raise InvalidRecipientException()\n elif code == 'recipientBlocked':\n raise RecipientBlockedException()\n elif code == 'emptyMessageContent':\n raise EmptyMessageContentException()\n elif code == 'other':\n raise OtherMMSOAPException()\n else:\n pass\n\n except AttributeError:\n pass", "def recv_invalid_response(self, recv_data, invalid_type = \"\"):\t\n\tif (invalid_type == \"bit_signature\"):\n\t print(\"Error: Packet received from outside our network (wrong bit signature)\")\t \n\t recv_data = \"\"\t \n\telif (invalid_type == \"response_type\"):\n\t print(\"Error: Wrong response type in packet received.\")\t \n\t recv_data = \"\"\t \t\n\treturn", "def validate(self, response):\n return response[\"status_code\"] == 1", "def response_check(response):\n print(response)\n print(response.text)\n return response.status_code == 201", "def adrest_errors_mail(response, request):\r\n\r\n if not response.status_code in ADREST_MAIL_ERRORS:\r\n return False\r\n\r\n subject = 'ADREST API Error (%s): %s' % (\r\n response.status_code, request.path)\r\n stack_trace = '\\n'.join(traceback.format_exception(*sys.exc_info()))\r\n message = \"\"\"\r\nStacktrace:\r\n===========\r\n%s\r\n\r\nHandler data:\r\n=============\r\n%s\r\n\r\nRequest information:\r\n====================\r\n%s\r\n\r\n\"\"\" % (stack_trace, repr(getattr(request, 'data', None)), repr(request))\r\n return mail_admins(subject, message, fail_silently=True)", "def error( response ) :\n\t\twarnings.warn(\"deprecated in 0.3.0, use not responseGood()\", DeprecationWarning)\n\t\treturn not Databank.responseGood( response )", "def test_request_failed(self, kasserver, kasapi):\n kasapi.side_effect = zeep.exceptions.Fault(\"failed\")\n with pytest.raises(zeep.exceptions.Fault):\n kasserver._request(self.REQUEST_TYPE, self.REQUEST_PARAMS)", "def test_rsp_failure(self):\n\n def handle(event):\n return 0x0112, None\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ProceduralEventLogging)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_ACTION, handle)]\n )\n\n ae.add_requested_context(ProceduralEventLogging)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_action(\n ds, 1, ProceduralEventLogging, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0112\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def _check_200(self, response):\n if response.code != 200:\n raise YubiKeyVerificationError(\n \"Received {0} response.\".format(response.code))\n return response", "def _process_unsuccessful_response(\n self,\n response: Response,\n case: Literal['validate_api_key', 'balances', 'trades', 'asset_movements'],\n ) -> Union[\n list,\n tuple[bool, str],\n ExchangeQueryBalances,\n ]:\n try:\n response_list = jsonloads_list(response.text)\n except JSONDecodeError as e:\n msg = f'{self.name} {case} returned an invalid JSON response: {response.text}.'\n log.error(msg)\n\n if case in ('validate_api_key', 'balances'):\n return False, msg\n if case in ('trades', 'asset_movements'):\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n raise AssertionError(f'Unexpected {self.name} response_case: {case}') from e\n\n error_data = self._get_error_response_data(response_list)\n if error_data.error_code == API_ERR_AUTH_NONCE_CODE:\n message = API_ERR_AUTH_NONCE_MESSAGE\n # Errors related with the API key return a human readable message\n elif case == 'validate_api_key' and error_data.error_code == API_KEY_ERROR_CODE:\n message = API_KEY_ERROR_MESSAGE\n else:\n # Below any other error not related with the system clock or the API key\n reason = error_data.reason or response.text\n message = (\n f'{self.name} query responded with error status code: {response.status_code} '\n f'and text: {reason}.'\n )\n log.error(message)\n\n if case in ('validate_api_key', 'balances'):\n return False, message\n if case in ('trades', 'asset_movements'):\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {message}',\n )\n return []\n\n raise AssertionError(f'Unexpected {self.name} response_case: {case}')", "def recv_expect(self, expected_msg):\n response = self.c.recv(self.packet_len).decode('utf-8')\n if response != expected_msg:\n raise ValueError('Error: Invalid response from server, expecting %s got %s' % (expected_msg, response))", "def test_invalid_request(self):\n req = '{\"jsonrpc\": \"2.0\", \"method\": 1, \"params\": \"bar\"}'\n resp = '{\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Method must be a string\"}, \"id\": null}'\n status = 400\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))", "def notOk(response):\n if response.status_code == 200:\n return False\n elif response.status_code == 404:\n raise SummonerNotFound(\n \"Summoner was not found and may not exist (error 404)\")\n elif response.status_code == 429:\n raise OverRateLimit(\"The rate limit was exceeded (error 424)\")\n elif response.status_code == 403:\n raise ApiKeyError(\n \"Riot API key may be wrong or expired\" \n \" and/or endpoints need an update (error 403)\"\n )\n else:\n response.raise_for_status()", "def _check_response(self, response):\n if response.status_code == requests.codes.ok:\n # Since the ZenHub REST API does not send back 204 when there is\n # no content, we have to check the Content-Length for 0 :(\n if int(response.headers['Content-Length']):\n return response.json()\n elif response.status_code == requests.codes.not_found:\n return None\n else:\n return response.raise_for_status()", "def response(self, context, message):\r\n return True", "def check_status_code(response):\n if response.status_code == 400:\n print('ERROR: The request is malformed.')\n elif response.status_code == 404:\n print('ERROR: Page or resource not found, check endpoint and request.')\n elif response.status_code == 500:\n print('ERROR: server had an error.')\n else:\n print('ERROR: unknow error.')\n print('request = |', response.url, '|')\n print(response.headers)\n print(f'ERROR: code is {response.status_code}')\n print('ERROR: program stopped because of request errors.')\n exit(1)", "async def parse_handle_response(self, json_response):\n try:\n vasp = self.vasp\n other_key = vasp.info_context.get_peer_compliance_verification_key(\n self.other_address_str\n )\n message = await other_key.verify_message(json_response)\n response = json.loads(message)\n response = CommandResponseObject.from_json_data_dict(\n response, JSONFlag.NET\n )\n\n return self.handle_response(response)\n\n except OffChainInvalidSignature as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'Signature verification failed. OffChainInvalidSignature: {e}'\n )\n raise e\n except JSONParsingError as e:\n logger.warning(\n f'(other:{self.other_address_str}) JSONParsingError: {e}'\n )\n raise e\n except OffChainException or OffChainProtocolError as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'OffChainException/OffChainProtocolError: {e}',\n )\n raise e", "def parse_response(self, response, **kw):\n data = super().parse_response(response, **kw)\n error = data.get('error')\n if error is None:\n return data['result']\n else:\n # assume error object follows json-rpc 2.0 spec formatting\n self.handle_error(code=error['code'], msg=error['message'])", "def assert_error(self, result):\n error_msg = ['no height', 'invalid height range', 'invalid method', 'timeout', 'error', 'no hex',\n 'couldnt get addressutxos', 'invalid address or amount too small', 'not enough funds',\n 'invalid address or amount too small', 'invalid utxo', 'wif expired', 'not implemented yet',\n 'invalid utxo']\n result_d = self.type_convert(result)\n error = result_d.get('error')\n if error:\n if error in error_msg:\n pass\n else:\n raise AssertionError(\"Unknown error message\")\n else:\n raise AssertionError(\"Unexpected response\")", "def test_invalid_batch(self):\n req = '[1,2,3]'\n resp = '''[\n {\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Invalid JSON-RPC Message; must be an object\"}, \"id\": null},\n {\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Invalid JSON-RPC Message; must be an object\"}, \"id\": null},\n {\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Invalid JSON-RPC Message; must be an object\"}, \"id\": null}\n ]'''\n status = 200\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))", "def assertFailedRequest(self, response_data, expected_error):\r\n self.assertFalse(response_data['success'])\r\n self.assertEquals(expected_error, response_data['error'])\r\n self.assertFalse(self.user.email_user.called)", "def test_parse_correct_response(self):\n self.assertEqual(\n parse_server_answer(CORRECT_SERVER_RESPONSE),\n f'Correct message with response {CORRECT_SERVER_RESPONSE[RESPONSE]}.'\n )", "def test_handle_response_wrong_value_for_findnode_message(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n contact = lookup.shortlist[0]\n msg = Value(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, self.target,\n 'value', time.time(), time.time() + 99999, self.version,\n PUBLIC_KEY, 'name', 'signature')\n response = asyncio.Future()\n response.set_result(msg)\n lookup._blacklist = mock.MagicMock()\n lookup._handle_error = mock.MagicMock()\n lookup._handle_response(uuid, contact, response)\n lookup._blacklist.assert_called_once_with(contact)\n self.assertEqual(lookup._handle_error.call_count, 1)\n args = lookup._handle_error.call_args[0]\n self.assertEqual(args[0], uuid)\n self.assertEqual(args[1], contact)\n self.assertIsInstance(args[2], TypeError)\n self.assertEqual(args[2].args[0],\n \"Unexpected response type from {}\".format(contact))", "def is_success(msg):\n return msg['status'] == 'success'", "async def unhandled_response(self, pkt, source):\n if False:\n yield None" ]
[ "0.6853921", "0.68304646", "0.67763245", "0.6721088", "0.6715591", "0.67042756", "0.66370153", "0.6635388", "0.6612699", "0.6506884", "0.64258915", "0.63320297", "0.633047", "0.62748057", "0.6268025", "0.6258739", "0.6243886", "0.6212098", "0.6211683", "0.61967474", "0.6168335", "0.61320996", "0.61305135", "0.6090684", "0.6081253", "0.60785025", "0.6069736", "0.60594887", "0.6049864", "0.6029324", "0.6021287", "0.6013065", "0.6011714", "0.6009636", "0.60066426", "0.5998823", "0.59686404", "0.5964981", "0.5956326", "0.5953794", "0.59528804", "0.59444773", "0.5941236", "0.59292585", "0.5926647", "0.5924281", "0.59152013", "0.5910486", "0.59038556", "0.59034806", "0.58969104", "0.58938867", "0.5888951", "0.5882286", "0.5867609", "0.5866831", "0.5861891", "0.5859268", "0.5857433", "0.58567953", "0.5849795", "0.58478916", "0.58431494", "0.5841812", "0.5839966", "0.5839392", "0.583359", "0.5833038", "0.5824173", "0.5821938", "0.5821098", "0.58195233", "0.5818134", "0.5817939", "0.58111006", "0.58109957", "0.58056724", "0.58034086", "0.5792645", "0.5789523", "0.5789147", "0.57889634", "0.5787674", "0.5787188", "0.5765955", "0.5758532", "0.5756099", "0.57559514", "0.57532054", "0.57435244", "0.5739099", "0.5738627", "0.57380533", "0.57373446", "0.57316303", "0.5729226", "0.5727296", "0.5721061", "0.5715752", "0.5710227" ]
0.6120679
23
Send new formatted messages to CSIRT SQS
def _send_messages(self, batched_messages): @backoff.on_predicate(backoff.fibo, lambda resp: len(resp.get('Failed', [])) > 0, max_tries=self.MAX_BACKOFF_ATTEMPTS, max_value=self.MAX_BACKOFF_FIBO_VALUE, on_backoff=backoff_handler(debug_only=False), on_success=success_handler(), on_giveup=giveup_handler()) @backoff.on_exception(backoff.expo, self.EXCEPTIONS_TO_BACKOFF, max_tries=self.MAX_BACKOFF_ATTEMPTS, on_backoff=backoff_handler(debug_only=False), on_success=success_handler(), on_giveup=giveup_handler()) def _send_messages_helper(entries): """Inner helper function for sending messages with backoff_handler Args: entries (list<dict>): List of SQS SendMessageBatchRequestEntry items """ LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url) response = self.queue.send_messages(Entries=entries) if response.get('Successful'): LOGGER.info( 'Successfully sent %d message(s) to %s with MessageIds %s', len(response['Successful']), self.queue.url, ', '.join( '\'{}\''.format(resp['MessageId']) for resp in response['Successful'] ) ) if response.get('Failed'): self._check_failures(response) # Raise an exception if this is our fault self._strip_successful_records(entries, response) return response message_entries = [ { 'Id': str(idx), 'MessageBody': message } for idx, message in enumerate(batched_messages) ] # The try/except here is to catch any raised errors at the end of the backoff try: return _send_messages_helper(message_entries) except self.EXCEPTIONS_TO_BACKOFF: LOGGER.exception('SQS request failed') # Use the current length of the message_entries in case some records were # successful but others were not self._log_failed(len(message_entries)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_sqs_message(imageinfo):\r\n try:\r\n message = json.dumps(imageinfo)\r\n sqsclient = boto3.client('sqs')\r\n sqsclient.send_message(\r\n QueueUrl=os.environ['QueueURL'],\r\n MessageBody=message\r\n )\r\n\r\n except ClientError as err:\r\n logger.error(err.response)", "def send(self, payloads):\n records = self._payload_messages(payloads)\n\n # SQS only supports up to 10 messages so do the send in batches\n for message_batch in self._message_batches(records):\n response = self._send_messages(message_batch)\n self._finalize(response, message_batch)", "def send_msg(self, my_queue, my_msg):", "def dispatch_event(event):\n queue = connect_to_sqs() \n logging.info('Writing event to SQS:' + str(json.dumps(event.params)))\n\n visitor = event.params['visitors'][0]['visitor_id']\n attributes = event.params['visitors'][0]['attributes']\n snapshot = event.params['visitors'][0]['snapshots'][0]\n\n response = queue.send_message(MessageBody=json.dumps({visitor: (attributes, snapshot)}))", "def send(self, msg):\n self.ws.send(json.dumps(msg))", "def post_sms(self, date='2014-02-12', time='22:20:33', rnumber='0612345678', rname='Receiver', snumber='0612345678', sname='Sender', message='Bonjour !'):\n xmldata1 = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\" + \\\n \"<s:Envelope xmlns:s=\\\"http://schemas.xmlsoap.org/soap/envelope/\\\" s:encodingStyle=\\\"http://schemas.xmlsoap.org/soap/encoding/\\\">\" + \\\n \"<s:Body>\" + \\\n \"<u:AddMessage xmlns:u=\\\"urn:samsung.com:service:MessageBoxService:1\\\">\" + \\\n \"<MessageType>text/xml</MessageType>\" + \\\n \"<MessageID>can be anything</MessageID>\" + \\\n \"<Message>\" + \\\n \"&lt;Category&gt;SMS&lt;/Category&gt;\" + \\\n \"&lt;DisplayType&gt;Maximum&lt;/DisplayType&gt;\" + \\\n \"&lt;ReceiveTime&gt;\" + \\\n \"&lt;Date&gt;\"\n xmldata2 = \"&lt;/Date&gt;\" + \\\n \"&lt;Time&gt;\"\n xmldata3 = \"&lt;/Time&gt;\" + \\\n \"&lt;/ReceiveTime&gt;\" + \\\n \"&lt;Receiver&gt;\" + \\\n \"&lt;Number&gt;\"\n xmldata4 = \"&lt;/Number&gt;\" + \\\n \"&lt;Name&gt;\"\n xmldata5 = \"&lt;/Name&gt;\" + \\\n \"&lt;/Receiver&gt;\" + \\\n \"&lt;Sender&gt;\" + \\\n \"&lt;Number&gt;\"\n xmldata6 = \"&lt;/Number&gt;\" + \\\n \"&lt;Name&gt;\"\n xmldata7 = \"&lt;/Name&gt;\" + \\\n \"&lt;/Sender&gt;\" + \\\n \"&lt;Body&gt;\"\n xmldata8 = \"&lt;/Body&gt;\" + \\\n \"</Message>\" + \\\n \"</u:AddMessage>\" + \\\n \"</s:Body>\" + \\\n \"</s:Envelope>\"\n\n #Remove accentuation of message\n message = unicodedata.normalize('NFKD', message).encode('ASCII', 'ignore')\n #Bool to check error\n error_found = False\n #Create Header for Message\n header = \"POST /PMR/control/MessageBoxService HTTP/1.0\\r\\n\" + \\\n \"Content-Type: text/xml; charset=\\\"utf-8\\\"\\r\\n\" + \\\n \"Host: \" + self.host + \"\\r\\n\" + \\\n \"Content-Length: \" + str(len(xmldata1) + len(date) + \\\n len(xmldata2) + len(time) + \\\n len(xmldata3) + len(rnumber) + \\\n len(xmldata4) + len(rname) + \\\n len(xmldata5) + len(snumber) + \\\n len(xmldata6) + len(sname) + \\\n len(xmldata7) + len(message) + \\\n len(xmldata8)) + \"\\r\\n\" + \\\n \"SOAPACTION: urn:samsung.com:service:MessageBoxService:1#AddMessage\\r\\n\" + \\\n \"Connection: close\\r\\n\\r\\n\"\n #Create socket\n full_soap_request = header + \\\n xmldata1 + date + \\\n xmldata2 + time + \\\n xmldata3 + rnumber + \\\n xmldata4 + rname + \\\n xmldata5 + snumber +\\\n xmldata6 + sname +\\\n xmldata7 + message + xmldata8\n msg_port = 52235;\n \n try:\n # Open Socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.host, msg_port))\n sock.send(full_soap_request.encode('utf-8'))\n read = sock.recv(1024)\n sock.close()\n except socket.error, e:\n error_found = True\n raise TVError(e[1], 'post_sms')\n finally:\n sock.close()\n sock = None\n return error_found", "def send_to_sqs(sqs_queue_url, message):\n\n try:\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sqs.html#SQS.Client.send_message\n response = SQS.send_message(\n QueueUrl=sqs_queue_url,\n MessageBody=str(message)\n )\n except ClientError as e:\n LOGGER.error(e)\n return None\n return response", "def SendMessage(service, user_id, message):\n\n message_resp = (service.users().messages().send(userId=user_id, body=message).execute())\n print(\"Sucessfull!!! \", message_resp)", "def send_sms(self, sms):\n pass", "def output_raw_message(text):\n database.messages_output_queue.put(text)", "def send_message(self, message):\n pass", "def send_message(self, msg):\n self.logger.debug(msg)\n self.writer.send(json.dumps(msg))", "def send_messages(messages: dict) -> str:\n\n approval = input(\"\\nDo you wish to send these messages? Type 'yes' or 'no': \")\n if approval.lower() != 'yes':\n return \"Messages not approved. Please run the program again.\"\n\n for number in messages:\n body = messages[number]\n from_number = TWILIO_NUM # Already has +1 on it.\n to_number = \"+1\" + number\n\n message = client.messages.create(body=body, from_=from_number, to=to_number)\n print(message.sid)\n\n return \"All messages sent!\"", "def send(event):\n\n\tid = get_hostname()\n\n\tmessage = str(id) + \"|\" + str(event)\n\n\tif mq is None: # if no mq exists\n\t\tprint \"mq is None\"\n\n\telse: # if mq exists\n\t\ttry:\n\n\t\t\tmq.send(message)\n\t\t\tprint 'completed sending message'\n\n\t\texcept Exception as e:\n\n\t\t\tprint 'failed to send message: {}'.format(e)", "def publish_messages(line): \n command = \"gcloud beta pubsub topics publish \"+ topic_name+\" --message \"+'\"'+str(line)+'\"'\n os.system(command)", "def send_msg(self, msg):\n self.msg_queue.put(dict(to=settings.IOTTLY_XMPP_SERVER_USER,msg='/json ' + json.dumps(msg)))", "async def send(self, message):", "def output_message(text):\n out_text = '[' + str(datetime.now().strftime(\"%H:%M:%S\")) + '] ' + text\n database.messages_output_queue.put(out_text)", "def send_messages_to_ks(records: List[str], stream_name: str):\n log.info('Sending message to Kinesis Stream')\n client = boto3.client('kinesis')\n return client.put_records(\n Records=[\n {\n 'Data': record + '\\n',\n 'PartitionKey': '1'\n } for record in records],\n StreamName=stream_name\n )", "def _write_message(self, message):\n raw_data = message.serialize()\n debug(\"writing outgoing message of type \" + message.__class__.__name__)\n self.request.sendall(raw_data)", "def subscribe_sqs_queue(self, topic, queue):\r\n t = queue.id.split('/')\r\n q_arn = 'arn:aws:sqs:%s:%s:%s' % (queue.connection.region.name,\r\n t[1], t[2])\r\n resp = self.subscribe(topic, 'sqs', q_arn)\r\n policy = queue.get_attributes('Policy')\r\n if 'Version' not in policy:\r\n policy['Version'] = '2008-10-17'\r\n if 'Statement' not in policy:\r\n policy['Statement'] = []\r\n statement = {'Action' : 'SQS:SendMessage',\r\n 'Effect' : 'Allow',\r\n 'Principal' : {'AWS' : '*'},\r\n 'Resource' : q_arn,\r\n 'Sid' : str(uuid.uuid4()),\r\n 'Condition' : {'StringLike' : {'aws:SourceArn' : topic}}}\r\n policy['Statement'].append(statement)\r\n queue.set_attribute('Policy', json.dumps(policy))\r\n return resp", "def send_message():\n incoming = request.get_json()\n message = Message(\n user_id = session['user_id'],\n room_id = incoming[\"room_id\"],\n sendTime = datetime.now(),\n content = incoming[\"content\"]\n )\n db.session.add(message)\n db.session.commit()\n return jsonify(\n content = incoming[\"content\"]\n )", "def coinbasepro_on_message(caller, msg):\n msg = json.loads(msg)\n # if msg['type'] == 'match':\n if msg['type'][2] == 't':\n chnl = msg[\"product_id\"]\n df = pd.DataFrame.from_records(\n data=[{\n \"tid\": int(msg[\"trade_id\"]),\n \"price\": float(msg[\"price\"]),\n \"volume\": float(msg['size']) if msg['side'] == 'buy' else -float(msg['size']),\n \"datetime\": pd.to_datetime(msg[\"time\"])\n }],\n index=\"datetime\"\n )\n df.index = df.index.tz_convert(\"GMT0\")\n caller.write(chnl, df)\n\n return chnl, df", "def _create_msg(self, tr_id, payload, confirm, expire_time, encoding):\n tmp = [\"<SSAP_message><transaction_type>INSERT</transaction_type>\",\n \"<message_type>REQUEST</message_type>\"]\n tmp.extend([\"<transaction_id>\", str(tr_id), \"</transaction_id>\"])\n tmp.extend([\"<node_id>\", str(self.node_id), \"</node_id>\"])\n tmp.extend([\"<space_id>\", str(self.targetSS), \"</space_id>\"])\n tmp.extend(['<parameter name=\"insert_graph\" encoding=\"%s\">' % encoding.upper(),\n str(payload), \"</parameter>\"])\n tmp.extend(['<parameter name = \"confirm\">',\n str(confirm).upper(),\n \"</parameter>\",\n \"</SSAP_message>\"])\n return \"\".join(tmp)", "def send_message(prepared_data):\n message_url = BOT_URL + '/sendMessage'\n requests.post(message_url, json=prepared_data) # don't forget to make import requests lib", "def send_message(self, message):\n self.client.queue.put(message)", "def write_message_to_queue(queue_url, experiment_name, message_text,\n model=None):\n sqs = boto3.client('sqs')\n\n instance_id, request_id = get_instance_and_spot_request_id()\n\n if model is not None:\n epoch = model.current_epoch\n else:\n epoch = None\n\n attributes = {\n \"experiment_name\": {\n \"DataType\": \"String\",\n \"StringValue\": experiment_name\n },\n \"epoch\": {\n \"DataType\": \"String\",\n \"StringValue\": str(epoch)\n },\n \"timestamp\": {\n \"DataType\": \"String\",\n \"StringValue\": str(datetime.datetime.now().isoformat())\n },\n \"instance_id\": {\n \"DataType\": \"String\",\n \"StringValue\": str(instance_id)\n },\n \"request_id\": {\n \"DataType\": \"String\",\n \"StringValue\": str(request_id)\n }\n }\n\n # The message group id is the general name of the experiment, minus the\n # suffix indicating the simulation number\n message_group_id = \"_\".join(experiment_name.split(\"_\")[:-1])\n\n response = sqs.send_message(\n QueueUrl=queue_url,\n MessageAttributes=attributes,\n MessageBody=message_text,\n MessageGroupId=message_group_id,\n MessageDeduplicationId=str(time.time()) + str(random.random())\n\n )\n\n print(\"I have written message {0} with response {1}\".format(\n message_text, response[\"ResponseMetadata\"][\"HTTPStatusCode\"]))\n\n if response[\"ResponseMetadata\"][\"HTTPStatusCode\"] != 200:\n print(\"Something went wrong! Full response:\")\n print(response)", "def send_str(self, msg_str, update, context):\n MSG = msg_str\n context.bot.send_message(chat_id=update.message.chat_id, text=MSG)", "def send_messages(self, bot, update, messages):\n\n for msg in messages:\n self.send_message(bot, update, msg)", "def send(self, message):\n pass", "def zmq_qry_pub(context):\n app.logger.info(\"zmq_qry_pub started\")\n socket = context.socket(zmq.PUB)\n socket.connect('tcp://127.0.0.1:7000')\n\n timestamps = ['0810', '0811', '0812']\n idx = EquityIndex('CAC')\n\n # for ts in cycle(timestamps):\n for ts in timestamps:\n price_data = idx.components_last_px(ts)\n\n for topic, msg_data in price_data.iteritems():\n if msg_data:\n # push the code/ticker into the dict\n msg_data['ticker'] = topic\n # reformat with a colon\n msg_data['ts'] = ts[:2] + ':' + ts[2:]\n # and jsonify....\n msg = json.dumps(msg_data)\n socket.send(msg)\n\n gevent.sleep(WAIT)\n\n app.logger.info(\"zmq_qry_pub closed\")", "def push_queue(self, url):\n self.sqs_client.send_message(\n QueueUrl=self.sqs_queue,\n MessageBody=url,\n )", "def send_sms(msg, phone_number, logger=None):\n\n if logger:\n logger.debug(f\"msg: '{msg}'\")\n logger.debug(f\"phone_number: '{phone_number}'\")\n sns = boto3.client('sns')\n try:\n sns.publish(PhoneNumber=phone_number, Message=msg)\n except BaseException as e:\n if logger:\n logger.error(e)\n raise e\n if logger:\n logger.info(f'SMS with available dates was sent to {phone_number}.')", "def send_SMS_wotd():\n\tcon = lite.connect('subscribers.db')\n\tcon.text_factory = str\n\tcur = con.cursor()\n\tcur.execute(\"SELECT * FROM Subscribers\")\n\trows = cur.fetchall()\n\n\tfor row in rows:\n\t#for person in contacts:\n\t\tmessage = client.messages.create(\n\t \tbody= \"Guess the word of the day! Definition: \" + wotd_def , # Message body, if any\n\t \tto= str(row[0]),#contacts[person],\n\t\t\tfrom_=base,\n\t\t)\n\tcon.close()", "def send(self, schema: str, msg: dict):\n raise NotImplementedError", "def send_message(self):\n self.preprocess_text()\n message_text = self.create_message_text()\n \n telnyx.Message.create(\n from_=configs.source_number,\n to=self.destination_number,\n text=message_text,\n )", "def scribe_write(self, messages):\r\n try:\r\n self.transport.open()\r\n result = self.client.Log(messages)\r\n if result != scribe.ResultCode.OK:\r\n raise self.ScribeHandlerException('Scribe message submission failed')\r\n except TTransport.TTransportException as err:\r\n raise self.ScribeHandlerException('Could not connect to scribe host=%s:%s error=%s'\r\n % (self._host, self._port, err))\r\n finally:\r\n self.transport.close()", "def _send_messages_helper(entries):\n LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url)\n\n response = self.queue.send_messages(Entries=entries)\n\n if response.get('Successful'):\n LOGGER.info(\n 'Successfully sent %d message(s) to %s with MessageIds %s',\n len(response['Successful']),\n self.queue.url,\n ', '.join(\n '\\'{}\\''.format(resp['MessageId'])\n for resp in response['Successful']\n )\n )\n\n if response.get('Failed'):\n self._check_failures(response) # Raise an exception if this is our fault\n self._strip_successful_records(entries, response)\n\n return response", "def handle_messages(self):\n\n #Get the time at which the code started running\n current_time = datetime.datetime.now()\n\n #get all messages between now and the time where a message was last received\n messages = self.client.messages.list(\n date_sent_before = datetime.datetime.now()+ datetime.timedelta(hours = TIMEDIFFERENCE),\n date_sent_after = self.last_message_timing + datetime.timedelta(hours = TIMEDIFFERENCE)\n )\n\n #Iterate through all the new messages\n for record in messages:\n #If it is not from the Twilio Client\n if record.from_ != 'whatsapp:+14155238886':\n #Then update the timing of the last message to the current time\n self.last_message_timing = current_time\n #If the message sent is the '?' that seeks to get the number\n #of people in the queue\n if record.body == '?':\n #Get the data about people from firebase\n people_data = self.firebase.get_data('people_count')\n #Get the number of people queueing\n no_of_people = people_data['people_count']\n #Create a message from the API to tell the person\n #asking the number of people in the queue\n message = self.client.messages.create(\n body='The number of the people in the queue is {}'.format(no_of_people),\n from_='whatsapp:{sender_number}'.format(**self.config),\n to=record.from_\n )", "def send_sms(to, datas, temp_id):\n cpp = CCP()\n cpp.sendTemplateSMS(to, datas, temp_id)", "def _send(self, ws, func, params):\n ws.send(self._create_msg(func, params))", "def test_send_queued_mail(self):\n # Make sure that send_queued_sms with empty queue does not raise error\n call_command('send_queued_sms')\n\n # Make sure bulk sms runs successfully\n smses = []\n for i in range(0, 300):\n # create 3 failed sms\n if i % 100 == 0:\n sms = SMS(to='+6280000000000', status=STATUS.queued, backend_alias='error')\n else:\n sms = SMS(to='+6280000000000', status=STATUS.queued, backend_alias='dummy')\n smses.append(sms)\n\n SMS.objects.bulk_create(smses)\n\n call_command('send_queued_sms')\n\n self.assertEqual(SMS.objects.filter(status=STATUS.sent).count(), 297)\n self.assertEqual(SMS.objects.filter(status=STATUS.failed).count(), 3)", "def send(self, message, delay=0, message_attributes=None):\n if message_attributes is None:\n message_attributes = {}\n return self.client.send_message(\n QueueUrl=self.app.settings['SQS_OUTBOUND_QUEUE_URL'],\n MessageBody=json.dumps(message),\n DelaySeconds=delay,\n MessageAttributes=message_attributes,\n )", "def send_msgs():\n\n scheduled = ScheduledMessage.query.filter( (ScheduledMessage.send_date<=datetime.datetime.now()) & (ScheduledMessage.sent=='f') ).all()\n print \"scheduled msgs = \", scheduled\n\n for msg in scheduled:\n user = User.query.filter_by(user_id=msg.user_id).one()\n contact = Contact.query.filter_by(contact_id=msg.contact_id).one()\n messages = Message.query.filter((Message.created_by==user.user_id) | (Message.created_by==1)).all()\n random_int = random.randint(0, len(messages) - 1)\n msg_text = messages[random_int].msg_text\n gmail.SendMessage(user.email, contact.email, 'Hey', msg_text, msg_text)\n msg.sent = True\n # schedule next message\n next_msg = ScheduledMessage(user_id=user.user_id, \n contact_id=contact.contact_id,\n send_date=msg.send_date + datetime.timedelta(days=contact.contact_period),\n sent=False)\n db.session.add(next_msg)\n db.session.commit()\n print \"sent message\"\n\n return \"All scheduled messages sent.\"", "def _send(self, message: str) -> None:\n logger.info(\"Send: {}\".format(message['type']))\n logger.debug(\"Send: {}\".format(message))\n\n message_b = (json.dumps(message) + '\\r\\n').encode()\n self.transport.write(message_b)", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def handle(event, context):\n dynamodb = boto3.client('dynamodb')\n connection_id = event['requestContext']['connectionId']\n connection_ids = []\n paginator = dynamodb.get_paginator('scan')\n\n # Retrieve all connection_ids from the database\n for page in paginator.paginate(TableName=os.environ.get('CONNECTION_TABLE_NAME')):\n connection_ids.extend(page['Items'])\n\n endpoint_url = f\"https://{event['requestContext']['domainName']}/{event['requestContext']['stage']}\"\n apigatewaymanagementapi = boto3.client('apigatewaymanagementapi', endpoint_url=endpoint_url)\n\n msg_counter = dynamodb.get_item(TableName=os.environ.get('MSG_COUNTER_TABLE_NAME'), Key={'myid': {'S': 'counter'}})\n if not msg_counter:\n msg_counter = 0\n else:\n msg_counter = msg_counter['Item']['msgCount']['N']\n\n data = f\"Welcome to Simple Chat\\n\" \\\n f\"There are {len(connection_ids)} users connected.\\n\" \\\n f\"Total of {msg_counter} messages recorded as of today.\\n\\n\"\n\n messages = get_messages()\n data = data + '\\n'.join(messages)\n send_to_self(apigatewaymanagementapi, connection_id, data)\n\n response = dynamodb.get_item(\n TableName=os.environ.get('CONNECTION_TABLE_NAME'),\n Key={'connectionId': {'S': connection_id}}\n )\n data = f\"{response['Item']['username']['S']} has joined the chat room\"\n send_to_all(apigatewaymanagementapi, connection_ids, data)\n\n return {}", "def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)", "def tweet(msg):\r\n m = \"\\n{}\\n\".format(msg)\r\n arcpy.AddMessage(m)\r\n print(m)\r\n print(arcpy.GetMessages())", "def publish(self, message: str) -> None:", "def callback(ch, method, properties, body):\n try:\n content_str = body.decode(\"utf-8\")\n\n json_msg = json.loads(content_str)\n\n # Push the message into the database\n json_df = get_df(json_msg)\n uc_df = pd.DataFrame(json_df, index=[0])\n\n sqldbm = SQLDatabaseManager()\n\n port = '3306'\n ret = sqldbm.connect(host=DB_IP,\n database=DATABASE,\n username=DB_UNAME,\n password=DB_PWD,\n port=port)\n\n if ret != 1:\n print(\" Closing program \")\n return\n\n sqldbm.insert(dframe=uc_df, table_name=\"UCHICAGO\",\n if_table_exists=\"append\")\n\n print(\"Message received -> stored\")\n return 1\n except Exception as e:\n print(\"Exception caught in callback \")\n print(e)\n print(\"Message could not be saved to DB. Skipping ....\")\n return -1", "def call_schedule(self, bot, update):\n bot.send_message(update.message.chat_id, '_1 пара_ 08:30 - 10:05\\n'\n '_2 пара_ 10:25 - 12:00\\n'\n '_3 пара_ 12:20 - 13:55\\n'\n '_4 пара_ 14:15 - 15:50\\n'\n '_5 пара_ 16:10 - 17:45',\n parse_mode='Markdown')", "def push(message: str, date: datetime.datetime):\n msg_id = str(uuid.uuid4())\n pipeline = connection.pipeline()\n pipeline.set(msg_id, message)\n pipeline.zadd(QUEUE_KEY, {\n msg_id: date.timestamp()\n })\n pipeline.execute()\n logger.info(f'Save a new future email: [message: {message}, date: {date}]')", "def format(self, message):", "def publish_messages(topic_arn, messages):\n sns_client = boto3.client('sns')\n for m in messages:\n message_as_json = json.dumps(m)\n response = sns_client.publish(\n TopicArn=topic_arn,\n MessageStructure='json',\n Message=json.dumps({\n 'default': message_as_json\n }),\n Subject=f'Source: {__file__}'\n )\n response_status = response['ResponseMetadata']['HTTPStatusCode']\n print(f'{message_as_json} -> {topic_arn} [{response_status}]')\n assert response_status == 200, response", "def sendMessage(message: str):\n pass", "async def _send_message(producer, event_data):\n batch = await producer.create_batch()\n batch.add(EventData(_serialize_event_data_as_json(event_data)))\n await producer.send_batch(batch)", "async def send_message(self, message):\n if message is None:\n return\n if isinstance(message, dict):\n message = json.dumps(message)\n if isinstance(message, Number):\n message = str(message)\n\n await self.ws.send(message)", "def send_text(msg, up):\n try:\n client = TwilioRestClient(account=TWILIO_ACCOUNT_SID,\n token=TWILIO_AUTH_TOKEN)\n c = client.sms.messages.create(to=up.phone,\n from_=WATTTIME_PHONE,\n body=msg.msg)\n TwilioSMSEvent(user=up.user,\n msg_type=msg.msg_type,\n to_number=up.phone,\n from_number=WATTTIME_PHONE,\n body=msg.msg).save()\n\n debug(\"texted '{}' to {}\".format(msg, str(up.name)))\n return True\n except:\n print (\"Faild message\", up.phone, WATTTIME_PHONE, msg.msg)\n debug(\"failed to text '{}' to {}\".format(msg, str(up.name)))\n return False", "def send_sms(self, body):\n message = self.twilio_client.sms.messages.create(to=self.to_num, from_=self.from_num, body=body)", "def writeMessage(self,message):\n pass", "def test_publish(self):\n target_arn = 'testing'\n supercuboid_key = 'acd123'\n message_id = '123456'\n receipt_handle = 'a1b2c3d4'\n message = serializer.encodeIngestMessage(supercuboid_key, message_id, receipt_handle)\n self.sns.publish(self.topic_arn, message)\n message = self.sns.subscribe(self.topic_arn)", "def send_message(self, serial_message):\n #print(\"Sending message: %s\" % serial_message)\n self.sendString(serial_message)", "def handler(event, context):\n message = [record['body'] for record in event.get('Records', [])]\n email_record = json.loads(message[0])[\"Records\"][0]\n\n new_email = [(email_record['s3']['bucket']['name'],\n urllib.parse.unquote(email_record['s3']['object']['key']))]\n\n if new_email:\n LOG.info(\"Changed/new object notification received from S3 bucket to the sqs queue\")\n for bucket, s3_key in new_email:\n LOG.info(\"Processing S3 bucket://%s/%s\", bucket, s3_key)\n email_body = S3.Object(bucket, s3_key).get()['Body'].read().decode('utf-8')\n\n # Process PBS job info and push the metadata doc to AWS ES\n _process_pbs_job_info(email_body)\n else:\n LOG.info(\"No new/updated email record found in the S3 bucket\")", "def _announce_updates(self, updates):\n serialized = json.dumps(updates)\n log.debug('Sending serialized message: ' + serialized)\n msg = amqp.Message(serialized, content_type='application/json')\n self.channel.basic_publish(msg, self.amqp['routing_key'])", "def sendSMS(sender,recipients,smsBody,provider_api_username='herve.m',provider_api_password='jamiroquai'):\n def printOutput(sender,recipients,smsBody):\n \"\"\"dev, debugging utility method\"\"\"\n message = ' sender : ' + sender\n message += '\\n to : ' + recipients[0]\n message += '\\n body : ' + smsBody\n print ''\n print ''\n print '____________________________________________________________________'\n print message\n print '____________________________________________________________________'\n\n def parseOutput(output):\n \"\"\"Returns parsed values from output with format:\n SUCCESS MessageId: 357958; Cost: 0.80; 0: Accepted for delivery;\n\n Returns:\n boolean (success),\n int (MessageId),\n int (status),\n float (cost),\n string (status message)\n \"\"\"\n vls=output.split(';')\n if len(vls)>=3:\n sm=vls[0].split(' ')\n cs=vls[1].split(':')\n st=vls[2].split(':')\n return str(sm[0]).find('SUCCESS')>=0,int(sm[2]),int(st[0].lstrip()),float(cs[1].lstrip()),st[1].lstrip()\n else:\n return False,-1,-1,0,output\n\n url='http://www.amdtelecom.net/api/sendsms.php'\n parameters={\n 'from' : sender,\n 'to' : recipients[0],\n 'username' : provider_api_username,\n 'password' : provider_api_password,\n 'text' : stringToAscii(smsBody)\n }\n fetchRes=None\n msg='util.sendSMS:logging.info'\n try:\n logging.info('util.sendSMS.fetchHttpRequestData')\n msg='FETCHING SMS SEND FROM API'\n fetchRes=fetchHttpRequestData(parameters,\n url,\n request_output='text',\n request_method='GET')\n if fetchRes is not None:\n msg='PARSING SMS SEND FETCH API OUTPUT: '\n bst,msgid,stid,cs,msg=parseOutput(fetchRes)\n if not bst:logging.error('ERROR RETURNED FROM SMS SEND API:'+fetchRes+' - PARAMS'+str(parameters))\n return fetchRes,bst,msgid,stid,float(cs),msg\n else:\n logging.error(msg+' - PARAMS'+str(parameters))\n return (None,False,-1,-1,float(0),\n msg+' - PARAMS'+str(parameters))\n except Exception, ex:\n if fetchRes is None:fetchRes='None'\n logging.error('ERROR '+msg+' - EXCEPTION:'+str(ex)+'- FETCH RES:'+fetchRes)\n return (None,False,-1,-1,float(0),\n msg+' - PARAMS'+str(parameters)+' - FETCH RES:'+fetchRes)", "async def queue(self, ctx):\n srv = self.get_server_dict(ctx.message.server.id)\n que = srv['queue']\n msg = self.format_song_display('▶', srv['song'][1], srv['song'][2], srv['song'][3])\n i = 1\n for item in que:\n line = self.format_song_display(i, item[1], item[2], item[3])\n i += 1\n msg += line\n await ctx.bot.send_message(ctx.message.channel, msg)", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "def open_quote(self):\n self.message += '{'", "def send(self, event, message):\n pass", "def push_from_spark():\n\n try:\n\n print('Receiving data from webhook')\n\n # step 1 -- we got message id, but no content\n #\n message_id = request.json['data']['id']\n\n # step 2 -- get the message itself\n #\n url = 'https://api.ciscospark.com/v1/messages/{}'.format(message_id)\n bearer = context.get('spark.CISCO_SPARK_TOKEN')\n headers = {'Authorization': 'Bearer '+bearer}\n response = requests.get(url=url, headers=headers)\n\n if response.status_code != 200:\n print(\"Received error code {}\".format(response.status_code))\n print(response.json())\n raise Exception\n\n # step 3 -- push it in the handling queue\n #\n ears.put(response.json())\n\n return \"OK\\n\"\n\n except Exception as feedback:\n print(\"ABORTED: fatal error has been encountered\")\n raise", "def send_invite_sms(profile, template_name, extra_context):\n c = {'profile': profile}\n c.update(extra_context or {})\n body = loader.render_to_string(template_name, c).strip()\n if len(body) <= 160:\n messages = [body.replace(\"\\n\", \" \")]\n else:\n messages = body.split(\"\\n\")\n for body in messages:\n profile.send_sms(body)", "def lambda_handler(event, context):\n for record in event[\"Records\"]:\n arn = record[\"Sns\"][\"TopicArn\"].split(\":\")\n message = json.loads(record[\"Sns\"][\"Message\"])\n message_handler(arn[3], message)\n return ''", "def tweet(msg):\n m = \"\\n{}\\n\".format(msg)\n arcpy.AddMessage(m)\n print(m)\n print(arcpy.GetMessages())", "def _send(self, msg, adb_info):\n packed = msg.pack()\n _LOGGER.debug(\"bulk_write(%d): %r\", len(packed), packed)\n self._transport.bulk_write(packed, adb_info.transport_timeout_s)\n\n if msg.data:\n _LOGGER.debug(\"bulk_write(%d): %r\", len(msg.data), msg.data)\n self._transport.bulk_write(msg.data, adb_info.transport_timeout_s)", "def sms_reply():\n # Fetch the message\n msg = request.form.get('Body')\n\n # Create reply\n resp = MessagingResponse()\n resp.message(\"You said: {} \\n *Kayra Dev* \".format(msg))\n \n account_sid = 'AC195cf76c0d725909794c30f9b0c32961' \n auth_token = '70531f5d14ec79c14254cf7fdfb40bad' \n client = Client(account_sid, auth_token) \n \n message = client.messages.create( \n from_='whatsapp:+14155238886', \n body=msg, \n to='whatsapp:+237696527034' \n ) \n \n print(message.sid)\n return str(resp)", "def sendMessage(self, payload, isBinary):", "async def send_message(request, token, data):\n # If data exists key 'message'\n try: \n async with request.app['db'].acquire() as conn:\n # First: Get a Chat ID \n chat_id = await PostgreSQL.get_chat_id(conn, data['chat'])\n # Second: Get an User ID \n user_id = await PostgreSQL.get_user_id(conn, token=token)\n # Third: Add a new message\n query = db.messages.insert().values(\n text = data['message'],\n isEdited = False,\n owner_id = user_id,\n chat_id = chat_id\n )\n # Fourth: make a response\n result = await conn.fetch(query)\n response = {\n 'status':'success',\n 'msg_id': result[0]['id']\n }\n except Exception as e:\n response = {\n 'status':'error',\n 'message':'Database error'\n }\n return response", "def _mq_callback(self, message):\n try:\n raw_data = RawData(message.body)\n try:\n session = self.ss_dao.get_one(raw_data.key[0], raw_data.session_id)\n\n # update the click_xxx info\n session = self.update_session_body(raw_data, session)\n duration = raw_data.key[1] - time_helper.session_to_epoch(session.key[1])\n session.total_duration = duration\n\n index = session.number_of_entries\n self.add_entry(session, index, raw_data)\n self.performance_ticker.update.increment_success()\n except LookupError:\n # insert the record\n session = SingleSession()\n\n # input data constraints - both session_id and user_id must be present in MQ message\n session.key = (raw_data.key[0], time_helper.raw_to_session(raw_data.key[1]))\n session.session_id = raw_data.session_id\n session.ip = raw_data.ip\n session.total_duration = 0\n\n session = self.update_session_body(raw_data, session)\n self.add_entry(session, 0, raw_data)\n self.performance_ticker.insert.increment_success()\n\n if time.time() - self._last_safe_save_time < self.SAFE_SAVE_INTERVAL:\n is_safe = False\n else:\n is_safe = True\n self._last_safe_save_time = time.time()\n\n self.ss_dao.update(session, is_safe)\n self.consumer.acknowledge(message.delivery_tag)\n except AutoReconnect as e:\n self.logger.error('MongoDB connection error: %r\\nRe-queueing message & exiting the worker' % e)\n self.consumer.reject(message.delivery_tag)\n raise e\n except (KeyError, IndexError) as e:\n self.logger.error('Error is considered Unrecoverable: %r\\nCancelled message: %r' % (e, message.body))\n self.consumer.cancel(message.delivery_tag)\n except Exception as e:\n self.logger.error('Error is considered Recoverable: %r\\nRe-queueing message: %r' % (e, message.body))\n self.consumer.reject(message.delivery_tag)", "def check_and_send_message_to_queue(queue_url, str_message):\n msg_str, msg_sent_timestamp, receipt_handle = lib.get_from_sqs_queue(queue_url, 20, 5)\n\n if not msg_str:\n logger.warning('Unable to retrieve message during this cycle.')\n return \n msg_data = json.loads(msg_str)\n \n msg_ts = float(msg_sent_timestamp) * 0.001\n logger.info('Message from queue: {}'.format(msg_data))\n current_time = time.time()\n\n logger.info('msg ts: {} current ts: {}'.format(msg_ts, current_time))\n\n if (current_time - msg_ts) > 259200:\n logger.info('Message in queue needs to be updated')\n lib.send_message_to_queue(queue_url, str_message)\n lib.delete_message_from_queue(queue_url, receipt_handle) \n else:\n logger.info('Message in queue is still current.')", "def send_message(message, destination):\n\n #Your code here\n pass", "def quick_email(self, send_to, subject, body, style=None):\n message = Message(body, style=style)\n\n self.send_message(message, send_to, subject)", "def bitfinex2_on_message(caller, msg):\n msg = json.loads(msg)\n if caller.subbed_count == 7:\n if msg[1] == \"te\":\n chnl = msg[0]\n body = msg[2]\n df = pd.DataFrame.from_records(\n data=[{\n \"tid\": int(body[0]),\n \"price\": float(body[3]),\n \"volume\": float(body[2]),\n \"datetime\": pd.to_datetime(body[1], unit='ms')\n }],\n index=\"datetime\"\n )\n # print (df)\n df.index = df.index.tz_localize(\"GMT0\")\n caller.write(chnl, df)\n\n return chnl, df\n\n if type(msg) is dict and \"event\" in msg and msg[\"event\"] == \"subscribed\":\n caller.config[\"channel_symbol\"][msg[\"chanId\"]] = \"bitfinex2\" + \":\" + bdic[msg[\"symbol\"]]\n caller.subbed_count += 1\n return\n\n\n chnl = msg[0]\n body = msg[2]\n df = pd.DataFrame.from_records(\n data=[{\n \"tid\": int(body[0]),\n \"price\": float(body[3]),\n \"volume\": float(body[2]),\n \"datetime\": pd.to_datetime(body[1], unit='ms')\n }],\n index=\"datetime\"\n )\n df.index = df.index.tz_convert(\"GMT0\")\n caller.write(chnl, df)\n\n return chnl, df", "def _send(self, command, payload):\n self.work_queue_client.send(command, payload)", "def send_fanout_message(context, region, sns_topic, cli=False):\n\n message = json.dumps({'region': region})\n LOG.debug('send_fanout_message: %s', message)\n\n if cli:\n perform_snapshot(context, region)\n else:\n utils.sns_publish(TopicArn=sns_topic, Message=message)", "def handle_inbound_sms(to, from_):\n body = MessageRequest()\n body.application_id = MESSAGING_APPLICATION_ID\n body.to = [from_]\n body.mfrom = to\n body.text = \"The current date-time is: \" + str(time.time() * 1000) + \" milliseconds since the epoch\"\n try:\n messaging_client.create_message(MESSAGING_ACCOUNT_ID, body)\n except Exception as e:\n print(e)\n return None", "def send_message(self, text):\n self.redis.publish('message_to_user', json.dumps((self.operator_token, text)))", "def sendData(self):\n out = ''\n for line in self.sendq:\n line = 'put ' + line + self.tagstr\n out += line + '\\n'\n LOG.debug('SENDING: %s', line)\n\n if not out:\n LOG.debug('no data in sendq?')\n return\n\n try:\n if self.dryrun:\n print out\n else:\n self.cissd.sendall(out)\n self.sendq = []\n # If an exception occurs, try sending data again next time\n except socket.error, msg:\n LOG.error('failed to send data: %s', msg)\n try:\n self.cissd.close()\n except socket.error:\n pass\n self.cissd = None", "def schc_message(self, message):\r\n self.enter_state()\r\n logging.debug(\"\\tMessage:\\n{}\".format(message.as_text()))\r\n return", "def send_text():\n current_app.logger.info('Sending a text message')\n json_data = request.json\n message = f\"{json_data['messagehead']} {json_data['message']} {json_data['link']}\"\n client = Client(config.TEXT_ACCOUNT, config.TEXT_TOKEN)\n message = client.messages.create(to=\"+44\" + config.NUMBER_TO,\n from_=config.NUMBER_FROM,\n body=message)", "def send_through_aprs(self, message) -> None:\n self.get_module_or_raise_error(\"aprs\").send(f\"{message}\") # FIXME FORAMTTING", "def send(self, *msgs):\n assert all(isinstance(m, bytes) for m in msgs)\n self.sendbuffer.extend([self.tag + m + b\"ROGER\" for m in msgs])", "def multiple_messages(self, messages):\n for message in messages:\n cmd = '{}serverMessage \"{}\"'.format(self.console, Commands.aquote(message))\n self.write_command(cmd)", "def makemsg2write(innermsg, inputtext=''):\n nowtuple = time.time()\n nowdatetime = datetime.datetime.fromtimestamp(nowtuple)\n finnalmsg = {'fmId': math.floor(nowtuple),\n 'fmTime': nowdatetime.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'fmSend': True, 'fmSender': innermsg['fmSender'],\n 'fmType': 'Text',\n 'fmText': f\"{inputtext}\"\n }\n writefmmsg2txtandmaybeevernotetoo(finnalmsg)", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def send_sms(message):\n client.messages.create(\n body=message,\n from_=os.environ['TWILIO_NUMBER_FROM'],\n to=['TWILIO_NUMBER_TO']\n )", "def send_sber(self):\n if len(self.objects) == 1:\n object_ = self.objects[0]\n subject = 'Закрытие ТТ и вывоз терминала ' \\\n f'{object_.object_SAP_code} {object_.object_name}'\n else:\n subject = 'Закрытие ТТ и вывоз терминалов'\n with open(SBER_TEMPLATE, encoding='utf-8') as f:\n template_text = f.read()\n template = Template(template_text)\n body = template.render(objects=self.objects, date=self.event_date.strftime(\"%d.%m.%Y\"))\n # body = \\\n # '<p>Добрый день!</p>' \\\n # '<p>В связи с закрытием ТТ '\\\n # f'{self.object_SAP_code} {self.object_name}, ' \\\n # 'прошу организовать вывоз терминала в первой половине дня '\\\n # f'{self.event_date.strftime(\"%d.%m.%Y\")}, адрес ТТ - {self.object_address}' \\\n # '<br>Заранее спасибо!</p>'\n\n self.sendmail(\n self.outlook,\n ['[email protected]', '[email protected]'],\n '[email protected]',\n subject,\n body,\n '',\n 1\n )", "async def send_messages_payload(self, messages, new_page_number):\n\t\tprint(\"DocumentChatConsumer: send_messages_payload. \")\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"messages_payload\": \"messages_payload\",\n\t\t\t\t\"messages\": messages,\n\t\t\t\t\"new_page_number\": new_page_number,\n\t\t\t},\n\t\t)", "def send_message(message: str) -> None:\n if message == \"\":\n return\n client = Client(config.TWILIO_ACCOUNT_SID, config.TWILIO_AUTH_TOKEN)\n sms = client.messages.create(\n from_=config.TWILIO_SENDER_NUMBER,\n to=config.TWILIO_RECEIVER_NUMBER,\n body=message\n )\n print(sms.status)" ]
[ "0.66801953", "0.6378957", "0.6152751", "0.60166675", "0.5753532", "0.5741753", "0.5703277", "0.56923753", "0.5691338", "0.5684069", "0.5646882", "0.5642848", "0.56269246", "0.56122345", "0.55771816", "0.5573004", "0.55638814", "0.55422395", "0.5541555", "0.55407935", "0.5535105", "0.55204105", "0.55132097", "0.54990125", "0.5498358", "0.5490899", "0.54895085", "0.5484197", "0.54759604", "0.54638207", "0.54586124", "0.54552877", "0.54516447", "0.54472965", "0.54373795", "0.5433477", "0.5431636", "0.5424575", "0.53990734", "0.53674537", "0.5364053", "0.5356913", "0.53546643", "0.5353933", "0.5353553", "0.53332865", "0.53332865", "0.53332865", "0.53328323", "0.53296274", "0.5328781", "0.53235704", "0.53209156", "0.5318097", "0.5309691", "0.5308926", "0.53072697", "0.53072697", "0.5302777", "0.52984697", "0.52959", "0.52957183", "0.52897453", "0.5289542", "0.52867895", "0.528444", "0.528318", "0.5277471", "0.5268197", "0.5267415", "0.52641773", "0.526097", "0.52505386", "0.5249412", "0.5242724", "0.5242422", "0.5241033", "0.5229815", "0.5220205", "0.5217947", "0.52157646", "0.5208172", "0.5207696", "0.5207455", "0.5206523", "0.5206029", "0.52049464", "0.5198735", "0.51916254", "0.5189226", "0.51848036", "0.5182998", "0.51785755", "0.5177701", "0.5177381", "0.51772946", "0.51688915", "0.5168765", "0.51684", "0.51653844", "0.5159382" ]
0.0
-1
Inner helper function for sending messages with backoff_handler
def _send_messages_helper(entries): LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url) response = self.queue.send_messages(Entries=entries) if response.get('Successful'): LOGGER.info( 'Successfully sent %d message(s) to %s with MessageIds %s', len(response['Successful']), self.queue.url, ', '.join( '\'{}\''.format(resp['MessageId']) for resp in response['Successful'] ) ) if response.get('Failed'): self._check_failures(response) # Raise an exception if this is our fault self._strip_successful_records(entries, response) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _send_messages(self, batched_messages):\n @backoff.on_predicate(backoff.fibo,\n lambda resp: len(resp.get('Failed', [])) > 0,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n max_value=self.MAX_BACKOFF_FIBO_VALUE,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n @backoff.on_exception(backoff.expo, self.EXCEPTIONS_TO_BACKOFF,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n def _send_messages_helper(entries):\n \"\"\"Inner helper function for sending messages with backoff_handler\n\n Args:\n entries (list<dict>): List of SQS SendMessageBatchRequestEntry items\n \"\"\"\n LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url)\n\n response = self.queue.send_messages(Entries=entries)\n\n if response.get('Successful'):\n LOGGER.info(\n 'Successfully sent %d message(s) to %s with MessageIds %s',\n len(response['Successful']),\n self.queue.url,\n ', '.join(\n '\\'{}\\''.format(resp['MessageId'])\n for resp in response['Successful']\n )\n )\n\n if response.get('Failed'):\n self._check_failures(response) # Raise an exception if this is our fault\n self._strip_successful_records(entries, response)\n\n return response\n\n message_entries = [\n {\n 'Id': str(idx),\n 'MessageBody': message\n } for idx, message in enumerate(batched_messages)\n ]\n\n # The try/except here is to catch any raised errors at the end of the backoff\n try:\n return _send_messages_helper(message_entries)\n except self.EXCEPTIONS_TO_BACKOFF:\n LOGGER.exception('SQS request failed')\n # Use the current length of the message_entries in case some records were\n # successful but others were not\n self._log_failed(len(message_entries))\n return", "def _backoff_handler(details):\n LOGGER.debug('[Backoff]: Trying again in %f seconds after %d tries calling %s',\n details['wait'],\n details['tries'],\n details['target'].__name__)", "async def _sleep_backoff(\n self, settings: Dict[str, Any], transport: AsyncHttpTransport[HTTPRequestType, AsyncHTTPResponseType]\n ) -> None:\n backoff = self.get_backoff_time(settings)\n if backoff <= 0:\n return\n await transport.sleep(backoff)", "def test_pollingBackoff(self):\n\n # Speed up the backoff process\n self.patch(ControllerQueue, \"queuePollingBackoff\", ((1.0, 60.0),))\n\n # Wait for backoff\n while self.node1._actualPollInterval == self.node1.queuePollInterval:\n d = Deferred()\n reactor.callLater(1.0, lambda : d.callback(None))\n yield d\n\n self.assertEqual(self.node1._actualPollInterval, 60.0)\n\n # TODO: this exact test should run against LocalQueuer as well.\n def operation(txn):\n # TODO: how does \"enqueue\" get associated with the transaction?\n # This is not the fact with a raw t.w.enterprise transaction.\n # Should probably do something with components.\n return txn.enqueue(DummyWorkItem, a=3, b=4, jobID=100, workID=1,\n notBefore=datetime.datetime.utcnow())\n yield inTransaction(self.store.newTransaction, operation)\n\n # Backoff terminated\n while self.node1._actualPollInterval != self.node1.queuePollInterval:\n d = Deferred()\n reactor.callLater(0.1, lambda : d.callback(None))\n yield d\n self.assertEqual(self.node1._actualPollInterval, self.node1.queuePollInterval)\n\n # Wait for it to be executed. Hopefully this does not time out :-\\.\n yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)\n\n # Wait for backoff\n while self.node1._actualPollInterval == self.node1.queuePollInterval:\n d = Deferred()\n reactor.callLater(1.0, lambda : d.callback(None))\n yield d\n\n self.assertEqual(self.node1._actualPollInterval, 60.0)", "def _success_handler(details):\n LOGGER.debug('[Backoff]: Completed after %d tries calling %s',\n details['tries'],\n details['target'].__name__)", "def _workout_messages(self, msgs_bunch):\n if msgs_bunch != []:\n while True:\n r = requests.post(self.url, headers = self.headers, data = json.dumps(msgs_bunch))\n # request success condition below - to end the handler\n if r.status_code == 200:\n break\n print('http_handler: failed to retranslate messages, try again in ' + str(self.timeout) + ' sec')\n time.sleep(self.timeout)\n # next bunch of messages will not be read until this function ends\n # current bunch of messags will be deleted in next request if delete_flag = True is set", "def backoff(\n max_tries=constants.BACKOFF_DEFAULT_MAXTRIES,\n delay=constants.BACKOFF_DEFAULT_DELAY,\n factor=constants.BACKOFF_DEFAULT_FACTOR,\n exception_handler=always_retry,\n before_delay_handler=noop,\n after_delay_handler=noop):\n if max_tries <= 0:\n raise ValueError((\n 'Max tries must be greater than 0; got {!r}'\n ).format(max_tries))\n\n if delay <= 0:\n raise ValueError((\n 'Delay must be greater than 0; got {!r}'\n ).format(delay))\n\n if factor <= 1:\n raise ValueError((\n 'Backoff factor must be greater than 1; got {!r}'\n ).format(factor))\n\n def outter(f):\n def inner(*args, **kwargs):\n m_max_tries, m_delay = max_tries, delay # make mutable\n while m_max_tries > 0:\n try:\n retval = f(*args, **kwargs)\n except Exception as ex:\n m_max_tries -= 1 # consume an attempt\n if m_max_tries < 0:\n # run out of tries\n raise\n if exception_handler(ex):\n logger.info(\n (\n 'backoff retry for: %r (max_tries=%r, '\n 'delay=%r, factor=%r)'\n ),\n f,\n max_tries,\n delay,\n factor\n )\n before_delay_handler(ex)\n time.sleep(m_delay) # wait...\n after_delay_handler(ex)\n m_delay *= factor # make future wait longer\n else:\n # exception handler gave up\n raise\n else:\n # done without errors\n return retval\n return inner\n return outter", "def send(self, message: Message, retry=0):\n pass # pragma: no cover", "def send_waiting_messages(wlist, messages_to_send):\n for message in messages_to_send:\n if wlist != []:\n wlist[0].send(message)\n messages_to_send.remove(message)", "def send_msg(self, my_queue, my_msg):", "def test_exp_backoff():\n stream = ReconnectingTweetStream('user', 'pass', initial_wait=1, max_wait=5,\n error_cb=error_callback)\n # A connection failure should happen automatically because of patch\n assert_raises(ConnectionError, stream.next)\n # By now, callback should have been invoked 3 times (1s, 2s, 4s)\n assert callback_invoked == 3", "def backoffState(self, tick):\n #assert (self.mState == States.BackOff)\n self.mBackoffIteration += 1\n if self.mBackoffIteration > self.MAX_BACKOFF_COUNT:\n ResultsSingleton.getInstance().recordError()\n self.mBackoffIteration = self.MAX_BACKOFF_COUNT\n #r = Random()\n R = np.random.uniform(0,1,1)[0] * ((2**self.mBackoffIteration) - 1)\n delay = R * self.bitTicks(self.BACKOFF_BITS)\n # convert delay to an int??\n self.mNextTickForRetryAfterBackoff = ((tick + int(delay)))\n #print(\"DELAY\")\n #print(self.mNextTickForRetryAfterBackoff)\n self.mState = self.States.BackOffWaiting", "def get_messages(self):\n @backoff.on_predicate(backoff.fibo,\n # Backoff up to 5 times\n max_tries=5,\n # Don't backoff for longer than 5 seconds\n # This constrains the total max backoff to 25 seconds\n max_value=5,\n jitter=backoff.full_jitter,\n on_backoff=_backoff_handler,\n on_success=_success_handler)\n def _receive_messages():\n polled_messages = self.sqs_client.receive_message(\n QueueUrl=self.athena_sqs_url,\n MaxNumberOfMessages=10\n )\n\n if not 'Messages' in polled_messages:\n return False\n self.received_messages.extend(polled_messages['Messages'])\n\n _receive_messages()\n LOGGER.info('Received %s messages', len(self.received_messages))", "def __call__(self, graph):\n result = graph.sqs_message_dispatcher.handle_batch()\n if not result.message_count:\n raise SleepNow()", "def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()", "def default_backoff(retries, max_retries):\n\n time.sleep(random.random() * (max_retries - retries) / max_retries * 2)", "def _exponential_backoff(backoff=0.1, max_delay=5):\n attempt = 0\n while True:\n delay = backoff * (2 ** attempt)\n if delay > max_delay:\n \"\"\"prevent redundant calculations\"\"\"\n break\n attempt += 1\n yield delay\n while True:\n yield max_delay", "def extend_backoff(durations, max_sleep=20):\n durations.append(random.random() * min(max_sleep, (2**len(durations) - 1)))", "async def exchanges_message_handler(bnc_websocket, ftx_websocket, param) -> None:\n\n ok = True\n while ok:\n try:\n # receiving updates\n bnc = await bnc_websocket.recv()\n ftx = await ftx_websocket.recv()\n # translate to execute strategy\n await price_analyze(json.loads(bnc), json.loads(ftx), param['p_d'], param['m'])\n # sleep if its needed\n await asyncio.sleep(param['r_r'])\n\n except ConnectionClosed:\n print('Connection Closed. Need to reboot.')\n ok = False", "def send_it(self, func, limit, *args, **kwargs):\n counter = 0\n if counter > limit:\n return False\n counter += 1\n try:\n result = func(*args, **kwargs)\n time.sleep(1.1)\n return result\n except gspread.exceptions.APIError as e:\n if (e.response.json())['error']['code'] == 429:\n time.sleep(501)\n self.send_it(func, limit, *args, **kwargs)\n else:\n print(e)\n return False\n except Exception as e:\n print(e)\n return False", "def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes):\n # Prepare message parameters from the template\n message = \"\"\n params = {'slice': self.name, 'hostname': socket.gethostname(),\n 'since': time.asctime(time.gmtime(self.time)) + \" GMT\",\n 'until': time.asctime(time.gmtime(self.time + period)) + \" GMT\",\n 'date': time.asctime(time.gmtime()) + \" GMT\",\n 'period': format_period(period)}\n\n if new_maxrate != (self.MaxRate * 1000):\n # Format template parameters for low bandwidth message\n params['class'] = \"low bandwidth\"\n params['bytes'] = format_bytes(usedbytes - self.bytes)\n params['limit'] = format_bytes(self.MaxKByte * 1024)\n params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate)\n\n # Cap low bandwidth burst rate\n message += template % params\n logger.log(\"bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s \" % params)\n\n if new_maxexemptrate != (self.Maxi2Rate * 1000):\n # Format template parameters for high bandwidth message\n params['class'] = \"high bandwidth\"\n params['bytes'] = format_bytes(usedi2bytes - self.i2bytes)\n params['limit'] = format_bytes(self.Maxi2KByte * 1024)\n params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate)\n\n message += template % params\n logger.log(\"bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s \" % params)\n\n # Notify slice\n if self.emailed == False:\n subject = \"pl_mom capped bandwidth of slice %(slice)s on %(hostname)s\" % params\n if DEBUG:\n logger.log(\"bwmon: \"+ subject)\n logger.log(\"bwmon: \"+ message + (footer % params))\n else:\n self.emailed = True\n logger.log(\"bwmon: Emailing %s\" % self.name)\n slicemail(self.name, subject, message + (footer % params))", "def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)", "def work(self, worker_id):\n\n try:\n while self.running:\n # blocking request - timeout 3 seconds\n messageSent = False\n try:\n # throws queue.Empty exception if it fails to get an item in 3 seconds\n priorityItem = self.message_queue.get(True, 3)\n topic = priorityItem.item.topic\n self.metric_handler.increment_observed()\n print(f\"sending message on topic {topic} approximate queue size: {self.message_queue.qsize()}\")\n\n if self.sampling == True:\n if self.worker_sample_counts[worker_id][topic] == self.topic_sample_rates[topic]:\n self.send_message(priorityItem.item, worker_id)\n self.worker_sample_counts[worker_id][topic] = 0\n else:\n self.worker_sample_counts[worker_id][topic] += 1\n else:\n self.send_message(priorityItem.item, worker_id)\n \n # might not have actually been sent if we are sampling, but dont attempt to send it in finally\n messageSent = True\n\n except (ConnectionResetError, BrokenPipeError, ConnectionResetError) as e:\n # should maybe record number of times connection breaks? Will get wordy\n self.get_logger().error(f\"Error sending socket message: {str(e)}\")\n self.init_socket_with_rety(worker_id)\n except queue.Empty:\n priorityItem = None\n pass\n finally:\n # give one more attempt at sending the message if we failed\n if not messageSent and priorityItem is not None:\n try:\n self.send_message(priorityItem.item, worker_id)\n except:\n pass\n except Exception as ex:\n self.get_logger().error(f\"Worker thread {worker_id} exitting unexpectedly with error: {str(ex)}\")\n self.get_logger().error(traceback.format_exc())\n finally:\n self.get_logger().info(f\"Worker thread {worker_id} finishing.\")", "def callback(ch, method, properties, body):\n print(f\"[X] Received %r\" % body)\n\n # wait for certain time until task is completed\n time.sleep(body.count(b'.'))\n print(\"[X] Done\")\n\n \"\"\"Acknowledge after completing task this prevents message\n message loss when the worker dies. And when worker\n dies message will be passes to another online worker.\n Caution: We are not talking about worker node of RabbitMQ.\n \"\"\"\n ch.basic_ack(delivery_tag=method.delivery_tag)", "def exponential_backoff(max_tries, max_sleep=20):\n return [random.random() * min(max_sleep, (2**i - 1)) for i in range(0, max_tries)]", "def after_send(self):", "def testSendNextMessage(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(3)\n self.mgr.queueMsg(2)\n self.mgr.queueMsg(1)\n self.mgr.processMsgQueue()\n self.v.send_mavlink.assert_called_with(3)\n self.assertEqual( self.mgr.msgQueue.qsize(), 2)", "def GetBackoff(self, retry_backoff, tries):\n if retry_backoff > 1:\n return retry_backoff * (2 ** (tries - 1))\n else:\n return retry_backoff", "async def send(self, message):", "def send_messages(messages):\n while messages:\n msg = messages.pop()\n sent_messages.append(msg)", "def test_redelivery_of_rejected_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 99)\n localConfig.submit_sm_throughput = 3\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 60 messages to the queue\n startAt = datetime.now()\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 60:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n counter = 0\n _receivedSubmitsCount = 0\n # Wait for 40 seconds before checking if all submits were delivered\n # It will check for throughput in each iteration\n while counter < 30:\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n\n _receivedSubmitsCount = len(receivedSubmits)\n\n # Wait some time\n yield waitFor(1)\n\n counter += 1\n endAt = datetime.now()\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(2)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 60)", "def handle_sleep(_):\n loop.sleep()", "def test_stress_send_and_receive_messages_short(self):\n nthreads = 10\n nmessages = 1000\n message_rate = 5000 # About the max on a decent laptop\n drop_rate = 0.1\n transport_failure_rate = 0.1\n max_transport_delay = 0.25 # seconds\n transport = MockTransport(\"localhost\")\n stresser = StressTester(self, nthreads, transport)\n stresser.open()\n transport.set_transport_characteristics(transport_failure_rate,\n max_transport_delay)\n stresser.submit_messages(nmessages, message_rate, drop_rate)\n self.assertFalse(stresser.invoker.exceptions) # no exceptions\n stresser.close()\n transport.close()", "def _retry_send_messages():\n\n max_retry_value = getattr(settings, \"DJMAIL_MAX_RETRY_NUMBER\", 3)\n queryset = models.Message.objects.filter(status=models.STATUS_FAILED)\\\n .filter(retry_count__lte=max_retry_value)\\\n .order_by(\"-priority\", \"created_at\")\n\n connection = _get_real_backend()\n paginator = Paginator(list(queryset), getattr(settings, \"DJMAIL_MAX_BULK_RETRY_SEND\", 10))\n\n for page_index in paginator.page_range:\n connection.open()\n for message_model in paginator.page(page_index).object_list:\n email = message_model.get_email_message()\n sended = connection.send_messages([email])\n\n if sended == 1:\n message_model.status = models.STATUS_SENT\n message_model.sent_at = timezone.now()\n else:\n message_model.retry_count += 1\n\n message_model.save()\n\n connection.close()", "async def send_wrapped_message(channel, message):\n for part in wrap(message, 2000):\n await channel.send(part)", "def _send_pending_messages():\n\n queryset = models.Message.objects.filter(status=models.STATUS_PENDING)\\\n .order_by(\"-priority\", \"created_at\")\n\n connection = _get_real_backend()\n paginator = Paginator(list(queryset), getattr(settings, \"DJMAIL_MAX_BULK_RETRY_SEND\", 10))\n\n for page_index in paginator.page_range:\n connection.open()\n for message_model in paginator.page(page_index).object_list:\n email = message_model.get_email_message()\n sended = connection.send_messages([email])\n\n if sended == 1:\n message_model.status = models.STATUS_SENT\n message_model.sent_at = timezone.now()\n else:\n message_model.retry_count += 1\n\n message_model.save()\n connection.close()", "async def sleep(\n self,\n settings: Dict[str, Any],\n transport: AsyncHttpTransport[HTTPRequestType, AsyncHTTPResponseType],\n response: Optional[PipelineResponse[HTTPRequestType, AsyncHTTPResponseType]] = None,\n ) -> None:\n if response:\n slept = await self._sleep_for_retry(response, transport)\n if slept:\n return\n await self._sleep_backoff(settings, transport)", "def write_handler(socket, buf):\n while True:\n try:\n message = buf.pop()\n logging.debug(\"sending data : %s\", message)\n socket.send(message)\n except IndexError:\n time.sleep(WAIT_INTERVAL)", "def test_connectionLostBackoffDelayDoubles(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.reactor.advance(self.pm.threshold - 1) #9s\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay)\r\n # process dies within the threshold and should not restart immediately\r\n self.pm.protocols[\"foo\"].processEnded(Failure(ProcessDone(0)))\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay * 2)", "def handle_congestion(self):\n print(\"\\nNetwork is congested! Sending rate is decreased.\\n\")\n self.delay *= 1.2\n time.sleep(1)", "def _send(self) -> None:\n if not self.connected or now() < self.next_send:\n return\n self.next_send += self.poll_interval\n buff = []\n while self.outq:\n msg_id, tag, data = self.outq.popleft()\n buff.append(pickle.dumps((msg_id, tag, data)))\n if buff:\n stream = b\"\".join(buff)\n self.endpoint.sendall(stream)", "def _client(self):\n while True:\n body = self.queue.get(True)\n print \"Sending %s bytes (%s/%s)\" % (len(body), self.queue.qsize(), self.queue.maxsize)\n\n try:\n req = urllib2.Request(self.endpoint, body)\n urllib2.urlopen(req).read()\n except:\n print \"Cannot send request. Retrying in 5 seconds\"\n print_exception(*sys.exc_info())\n print \"continuing...\"\n self.enqueue(body)\n sleep(5)", "def _delay(self, n=None):", "def send_messages(messages, sent_messages):\n while messages:\n current_message = messages.pop()\n print(f\"Sending message: {current_message}\")\n sent_messages.append(current_message)", "async def send_bulk_update_message(event_instance, message, notification_text):\n attendees = Attend.objects.filter(\n event=event_instance, status=\"attending\")\n for attendee in attendees:\n slack_id = attendee.user.slack_id\n if slack_id:\n message = generate_simple_message(message)\n slack_response = notify_user(\n message, slack_id, text=notification_text)\n\n if slack_response[\"ok\"] is False and slack_response[\"headers\"][\"Retry-After\"]:\n delay = int(slack_response[\"headers\"][\"Retry-After\"])\n logging.info(\"Rate limited. Retrying in \" + str(delay) + \" seconds\")\n sleep(delay)\n notify_user(\n message, slack_id, notification_text)\n elif not slack_response['ok']:\n logging.warning(slack_response)", "def _call(self, msg, cb, *args):\r\n if not self._status:\r\n raise InterfaceDisabledError('A disabled interface should not be '\r\n 'called.')\r\n\r\n if not callable(cb):\r\n raise TypeError('Callback has to be callable.')\r\n\r\n uid = uuid4().hex\r\n deferred = Deferred()\r\n deferred.addCallback(cb, *args)\r\n self._responses[uid] = deferred\r\n\r\n self._conn.sendMessage(self._iTag, self._clsName, msg, uid)", "def worker(my_idx, inq, outq):\n print(\"worker %d: starting\" % my_idx)\n backoff = .001\n while True:\n cmd = inq.get()\n if cmd is None:\n break\n ridx, creds, cmds = cmd\n backoff = max(backoff / 2, 0.001)\n while True:\n try:\n responses = Gmail.batch_executor(creds, cmds)\n except Gmail.UserRateException:\n print(f'worker {my_idx}: backoff {backoff} sec')\n sleep(backoff)\n backoff = min(backoff * 2, 1.0)\n except Exception as ex:\n outq.put([ridx, ex])\n break\n else:\n outq.put([ridx, responses])\n break\n inq.task_done()\n print(\"worker %d stoping\" % my_idx)", "async def send(self):", "def backoff(start_sleep_time=0.1, border_sleep_time=30, factor=2, jitter=True):\n if start_sleep_time < 0.001:\n logger.warning('start_sleep_time fewer than 0.001 and will be set to 0.001')\n start_sleep_time = 0.001\n\n def decorator(target):\n @wraps(target)\n def retry(*args, **kwargs):\n attempt = 0\n while True:\n sleep_time = _sleep_time(start_sleep_time, border_sleep_time, factor, attempt, jitter)\n try:\n attempt += 1\n sleep(sleep_time)\n ret = target(*args, **kwargs)\n except Exception as e:\n logger.error(f'Exception is catched {e}')\n logger.warning(f'Wait fo {sleep_time} seconds and try again')\n else:\n return ret\n return retry\n return decorator", "def delay(self):\n # well, so this is really bad practice\n # but since the nature of this app \n # I have to make assumptions around time..\n is_delayed_trader = self.delayed\n now = time.time()\n if not is_delayed_trader or self.message_arrival_estimate is None:\n self.message_arrival_estimate = now + self.default_delay\n delay = self.default_delay\n else:\n current_arrival_estimate = now + self.__delay\n if self.message_arrival_estimate > current_arrival_estimate:\n diff = self.message_arrival_estimate - current_arrival_estimate\n delay = diff + self.__delay\n self.message_arrival_estimate = now + delay \n else: \n self.message_arrival_estimate = current_arrival_estimate\n delay = self.__delay\n delay = round(delay, 4)\n log.debug('trader %s: message delay %s.' % (self.tag, delay))\n return delay", "def test_dispatch_status(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n\n self.assertEqual(broker.get_messages('vumi', 'fooconn.status'), [])\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n\n yield worker_helper.dispatch_status(msg, 'fooconn')\n\n self.assertEqual(\n broker.get_messages('vumi', 'fooconn.status'), [msg])", "def run(self):\n\n itters = 0\n burstlimit = 0\n msg_sent = False # Whenever we don't send a message, we don't need to get into a long sleep.\n self.getQueues()\n while self.running:\n itters += 1\n\n while not self.queue_privmsg.empty():\n message = self.queue_privmsg.get()\n\n self.say(message[0], message[1], message[2])\n msg_sent = True\n burstlimit += 1\n\n if burstlimit >= self.burstlimit or int(time.time()) < self.lastburst:\n self.lastburst = int(time.time())\n break\n\n while not self.queue_notice.empty():\n message = self.queue_notice.get()\n\n self.notice(message[0], message[1], message[2])\n msg_sent = True\n burstlimit += 1\n\n if burstlimit >= self.burstlimit or int(time.time()) < self.lastburst + 5:\n self.lastburst = int(time.time())\n break\n\n if msg_sent:\n time.sleep(2)\n msg_sent = False\n else:\n time.sleep(.25)\n\n if itters % 3 == 0 and burstlimit != 0:\n # Resets the burst limit every 6 seconds.\n itters = 0\n burstlimit = 0", "def send_message(self, message=\"\", status=200, expiry=None):\n init()\n wrapper = {'status_code': status, 'message': message, \"id\": self.get_session_id()}\n if expiry is not None:\n wrapper['expiry'] = expiry\n\n payload = json.dumps(wrapper, default=date_handler)\n session_id = self.get_session_id()\n script = \"\"\"\n local queue = redis.call('hget', KEYS[3] .. 'active_sessions', KEYS[1])\n if queue == '' then\n redis.log(redis.LOG_DEBUG, \"BACKLOG : No Queue, pushing to backlog \" .. KEYS[1])\n redis.call(\"lpush\", KEYS[3] .. KEYS[1] .. \"_backlog\", KEYS[2])\n return \"BACKLOG\"\n elseif queue == nil then\n redis.log(redis.LOG_DEBUG, \"TIMEOUT : Timed out \" .. KEYS[1])\n return \"TIMEOUT\"\n end\n local result = redis.call(\"publish\", queue, KEYS[2])\n if result == 0 then\n redis.log(redis.LOG_DEBUG, \"BACKLOG_FAILURE : PUB Failed, saving to backlog \" .. KEYS[1])\n redis.call(\"lpush\", KEYS[3] .. KEYS[1] .. \"_backlog\", KEYS[2])\n return \"BACKLOG_FAILURE\"\n end\n redis.log(redis.LOG_DEBUG, \"SUCCESS : \" .. KEYS[1])\n return \"SUCCESS\"\n \"\"\"\n return r.eval(script, 3, session_id, payload, comet_config.REDIS_NAMESPACE)", "def test_producer_send_messages_batched_partial_success(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n topic2 = \"tpsmbps_two\"\n client.topic_partitions = {self.topic: [0, 1, 2, 3], topic2: [4, 5, 6]}\n client.metadata_error_for_topic.return_value = False\n\n init_resp = [\n ProduceResponse(self.topic, 0, 0, 10),\n ProduceResponse(self.topic, 1, 6, 20),\n ProduceResponse(topic2, 5, 0, 30),\n ]\n next_resp = [\n ProduceResponse(self.topic, 2, 0, 10),\n ProduceResponse(self.topic, 1, 0, 20),\n ProduceResponse(topic2, 4, 0, 30),\n ]\n failed_payloads = [\n (ProduceRequest(self.topic, ANY, ANY), NotLeaderForPartitionError()),\n (ProduceRequest(topic2, ANY, ANY), BrokerNotAvailableError()),\n ]\n\n client.send_produce_request.side_effect = [\n fail(Failure(FailedPayloadsError(init_resp, failed_payloads))),\n succeed(next_resp),\n ]\n\n msgs = self.msgs(range(10))\n results = []\n\n producer = Producer(client, batch_send=True, batch_every_t=0)\n # Send 5 total requests: 4 here, one after we make sure we didn't\n # send early\n results.append(producer.send_messages(self.topic, msgs=msgs[0:3]))\n results.append(producer.send_messages(topic2, msgs=msgs[3:5]))\n results.append(producer.send_messages(self.topic, msgs=msgs[5:8]))\n results.append(producer.send_messages(topic2, msgs=msgs[8:9]))\n # No call yet, not enough messages\n self.assertFalse(client.send_produce_request.called)\n # Enough messages to start the request\n client.reset_topic_metadata.reset_mock()\n results.append(producer.send_messages(self.topic, msgs=msgs[9:10]))\n # Before the retry, there should be some results\n self.assertEqual(init_resp[0], self.successResultOf(results[0]))\n self.assertEqual(init_resp[2], self.successResultOf(results[3]))\n # And the errors should have forced a metadata reset on one of the topics.\n client.reset_topic_metadata.assert_called_with(self.topic)\n # Advance the clock to trigger retries.\n clock.advance(producer._retry_interval)\n # Check the otehr results came in\n self.assertEqual(next_resp[0], self.successResultOf(results[4]))\n self.assertEqual(next_resp[1], self.successResultOf(results[2]))\n self.assertEqual(next_resp[2], self.successResultOf(results[1]))\n\n producer.stop()", "async def dummy(msg, writer):\n writer.write(json.dumps(msg).encode(ENCODE))\n await writer.drain()", "def test_delivery_of_queued_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = str(randint(10, 99))\n localConfig.requeue_delay = 2\n localConfig.submit_sm_throughput = 20\n yield self.add(localConfig)\n\n # Send 150 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 150:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 20 seconds\n yield waitFor(20)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 30 seconds, all the rest of the queue must be sent\n yield waitFor(50)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(20)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 150)", "async def bc(self,ctx,*,msg):\n for hook in self.bot.mwebhooks:\n try:\n async def send_webhook():\n async with aiohttp.ClientSession() as session:\n webhook = Webhook.from_url(\n f\"{hook}\", adapter=AsyncWebhookAdapter(session))\n\n e = discord.Embed(color = self.bot.color,description=msg)\n e.set_author(name=ctx.author,icon_url = ctx.author.avatar_url)\n await webhook.send(embed=e)\n\n await send_webhook()\n except:\n continue", "def _do(self):\n # Get all the messages in queue\n msgs = self.RPC.query.all()\n for msg in msgs:\n # Find the first msg marked as enqueued.\n\n if msg.working and \\\n (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n > self.conf.messaging_server.response_timeout:\n msg.status = message.Message.ENQUEUED\n msg.update(condition=self.working_status_condition)\n\n if not msg.enqueued:\n continue\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n\n # Change the status to WORKING (operation with a lock)\n msg.status = message.Message.WORKING\n msg.owner = socket.gethostname()\n # All update should have a condition (status == enqueued)\n _is_updated = msg.update(condition=self.enqueued_status_condition)\n\n if not _is_updated or 'FAILURE' in _is_updated:\n continue\n\n # RPC methods must not start/end with an underscore.\n if msg.method.startswith('_') or msg.method.endswith('_'):\n error_msg = _LE(\"Method {} must not start or end\"\n \"with underscores\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # The first endpoint that supports the method wins.\n method = None\n for endpoint in self.endpoints:\n if msg.method not in dir(endpoint):\n continue\n endpoint_method = getattr(endpoint, msg.method)\n if callable(endpoint_method):\n method = endpoint_method\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {} is \"\n \"handled by endpoint {}\".\n format(msg.id, msg.method,\n method.__str__.__name__))\n break\n if not method:\n error_msg = _LE(\"Message {} method {} unsupported \"\n \"in endpoints.\").format(msg.id, msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # All methods must take a ctxt and args param.\n if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:\n error_msg = _LE(\"Method {} must take three args: \"\n \"self, ctx, arg\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n LOG.info(_LI(\"Message {} method {} received\").format(\n msg.id, msg.method))\n if self.conf.messaging_server.debug:\n LOG.debug(\n _LI(\"Message {} method {} context: {}, args: {}\").format(\n msg.id, msg.method, msg.ctxt, msg.args))\n\n failure = None\n try:\n\n # Add the template to conductor.plan table\n # Methods return an opaque dictionary\n result = method(msg.ctxt, msg.args)\n\n # FIXME(jdandrea): Remove response/error and make it opaque.\n # That means this would just be assigned result outright.\n msg.response = result.get('response', result)\n except Exception:\n # Current sys.exc_info() content can be overridden\n # by another exception raised by a log handler during\n # LOG.exception(). So keep a copy and delete it later.\n failure = sys.exc_info()\n\n # Do not log details about the failure here. It will\n # be returned later upstream.\n LOG.exception(_LE('Exception during message handling'))\n\n try:\n if failure is None:\n msg.status = message.Message.COMPLETED\n else:\n msg.failure = \\\n rpc_common.serialize_remote_exception(failure)\n msg.status = message.Message.ERROR\n LOG.info(_LI(\"Message {} method {}, status: {}\").format(\n msg.id, msg.method, msg.status))\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {}, response: {}\".format(\n msg.id, msg.method, msg.response))\n\n _is_success = 'FAILURE'\n while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n <= self.conf.messaging_server.response_timeout:\n _is_success = msg.update()\n LOG.info(_LI(\"updating the message status from working to {}, \"\n \"atomic update response from MUSIC {}\").format(msg.status, _is_success))\n\n except Exception:\n LOG.exception(_LE(\"Can not send reply for message {} \"\n \"method {}\").\n format(msg.id, msg.method))\n finally:\n # Remove circular object reference between the current\n # stack frame and the traceback in exc_info.\n del failure", "def test_dispatch_outbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.outbound'), [])\n msg = msg_helper.make_outbound('message')\n yield worker_helper.dispatch_outbound(msg, 'fooconn')\n self.assertEqual(\n broker.get_messages('vumi', 'fooconn.outbound'), [msg])", "def _send_msg(self, message: bin, location: BaseWorker) -> bin:\n if self.message_pending_time > 0:\n if self.verbose:\n print(f\"pending time of {self.message_pending_time} seconds to send message...\")\n sleep(self.message_pending_time)\n\n return location._recv_msg(message)", "def retry_wait_backoff(fn_check, fail_msg, max_wait=20):\n sleep_time = 0.1\n total_waited = 0.0\n while total_waited < max_wait:\n if fn_check():\n break\n log.info('{0}, retrying in {1:.2f}s'.format(fail_msg, sleep_time))\n total_waited += sleep_time\n time.sleep(sleep_time)\n sleep_time = min(sleep_time * 2, 5, max_wait - total_waited)\n else:\n raise TimeoutError('{0} after {1:.2f}s'.format(fail_msg, max_wait))", "def retry_wait_backoff(fn_check, fail_msg, max_wait=20):\n sleep_time = 0.1\n total_waited = 0.0\n while total_waited < max_wait:\n if fn_check():\n break\n log.info('{0}, retrying in {1:.2f}s'.format(fail_msg, sleep_time))\n total_waited += sleep_time\n time.sleep(sleep_time)\n sleep_time = min(sleep_time * 2, 5, max_wait - total_waited)\n else:\n raise TimeoutError('{0} after {1:.2f}s'.format(fail_msg, max_wait))", "def test_bulk_round_trip_with_backoff(self):\n self._test_bulk_round_trip(nodes=3, partitioner=\"murmur3\", num_operations=250000,\n copy_from_options={'MAXINFLIGHTMESSAGES': 64, 'MAXPENDINGCHUNKS': 1})", "async def test_delay_message(doof, repo_info, mocker):\n now = datetime.now(tz=doof.timezone)\n seconds_diff = 30\n future = now + timedelta(seconds=seconds_diff)\n next_workday_mock = mocker.patch('bot.next_workday_at_10', autospec=True, return_value=future)\n\n sleep_sync_mock = Mock()\n\n async def sleep_fake(*args, **kwargs):\n \"\"\"await cannot be used with mock objects\"\"\"\n sleep_sync_mock(*args, **kwargs)\n\n mocker.patch('asyncio.sleep', sleep_fake)\n\n mocker.patch('bot.get_unchecked_authors', return_value=['author1'])\n\n await doof.delay_message(repo_info)\n assert doof.said(\n 'The following authors have not yet checked off their boxes for doof_repo: author1',\n )\n assert next_workday_mock.call_count == 1\n assert abs(next_workday_mock.call_args[0][0] - now).total_seconds() < 1\n assert next_workday_mock.call_args[0][0].tzinfo.zone == doof.timezone.zone\n assert sleep_sync_mock.call_count == 1\n assert abs(seconds_diff - sleep_sync_mock.call_args[0][0]) < 1 # pylint: disable=unsubscriptable-object", "async def _listen(self,sub_params): \n async with websockets.connect(self.url) as websocket:\n await websocket.send(json.dumps(sub_params))\n # self.keepalive.start()\n start_time = time.time()\n while not self.shutdown_event.is_set():\n try:\n now = time.time()\n if((now - start_time) > 0.5):\n self.calculate_order_depth()\n start_time = now \n data = await websocket.recv()\n msg = json.loads(data)\n except ValueError as e:\n self.on_error(e)\n except Exception as e:\n self.on_error(e)\n else:\n self.on_message(msg)", "async def main():\n\n async def talk(bus, keys):\n \"\"\" generate some test messages \"\"\"\n\n for v in range(5):\n for k in keys:\n await asyncio.sleep(0.35)\n await bus.send(Message(\"local\", k, v))\n\n async def listen(bus, pattern):\n await asyncio.sleep(1.5)\n try:\n async for x in bus.listen(pattern):\n print(f\"listen({pattern}):\", x)\n except asyncio.CancelledError:\n pass\n\n async def monitor():\n \"\"\" echo bus status every 2 sec \"\"\"\n\n for n in range(6):\n await asyncio.sleep(2)\n print(\"monitor status:\", n, await ps.status())\n\n ps = BasicMessageBus()\n await ps.connect()\n\n tunnel_config = {\n \"ssh_address_or_host\": (\"robnee.com\", 22),\n \"remote_bind_address\": (\"127.0.0.1\", 6379),\n \"local_bind_address\": (\"127.0.0.1\",),\n \"ssh_username\": \"rnee\",\n \"ssh_pkey\": os.path.expanduser(r\"~/.ssh/id_rsa\"),\n }\n bridge = MessageBridge(\"cat.\", tunnel_config, ps)\n\n aws = (\n talk(ps, (\"cat.dog\", \"cat.pig\", \"cow.emu\")),\n listen(ps, \".\"),\n listen(ps, \"cat.\"),\n listen(ps, \"cat.pig\"),\n bridge.start(),\n monitor(),\n )\n await wait_graceafully(aws, timeout=15)\n\n await ps.close()\n \n print(\"main: done\")", "def on_background_job(self, event):\n job_cmd = event['Job-Command']\n job_uuid = event['Job-UUID']\n # TEST MIKE\n if job_cmd == 'originate' and job_uuid:\n try:\n status, reason = event.get_body().split(' ', 1)\n except ValueError:\n return\n request_uuid = self.bk_jobs.pop(job_uuid, None)\n if not request_uuid:\n return\n\n # case GroupCall\n if event['variable_plivo_group_call'] == 'true':\n status = status.strip()\n reason = reason.strip()\n if status[:3] != '+OK':\n self.log.info(\"GroupCall Attempt Done for RequestUUID %s (%s)\" \\\n % (request_uuid, reason))\n return\n self.log.warn(\"GroupCall Attempt Failed for RequestUUID %s (%s)\" \\\n % (request_uuid, reason))\n return\n\n # case Call and BulkCall\n try:\n call_req = self.call_requests[request_uuid]\n except KeyError:\n return\n # Handle failure case of originate\n # This case does not raise a on_channel_hangup event.\n # All other failures will be captured by on_channel_hangup\n status = status.strip()\n reason = reason.strip()\n if status[:3] != '+OK':\n # In case ring/early state done, just warn\n # releasing call request will be done in hangup event\n if call_req.state_flag in ('Ringing', 'EarlyMedia'):\n self.log.warn(\"Call Attempt Done (%s) for RequestUUID %s but Failed (%s)\" \\\n % (call_req.state_flag, request_uuid, reason))\n # notify end\n self.log.debug(\"Notify Call success for RequestUUID %s\" % request_uuid)\n call_req.notify_call_end()\n return\n # If no more gateways, release call request\n elif not call_req.gateways:\n self.log.warn(\"Call Failed for RequestUUID %s but No More Gateways (%s)\" \\\n % (request_uuid, reason))\n # notify end\n self.log.debug(\"Notify Call success for RequestUUID %s\" % request_uuid)\n call_req.notify_call_end()\n # set an empty call_uuid\n call_uuid = ''\n hangup_url = call_req.hangup_url\n self.set_hangup_complete(request_uuid, call_uuid,\n reason, event, hangup_url)\n return\n # If there are gateways and call request state_flag is not set\n # try again a call\n elif call_req.gateways:\n self.log.warn(\"Call Failed without Ringing/EarlyMedia for RequestUUID %s - Retrying Now (%s)\" \\\n % (request_uuid, reason))\n # notify try a new call\n self.log.debug(\"Notify Call retry for RequestUUID %s\" % request_uuid)\n call_req.notify_call_try()\n elif job_cmd == 'conference' and job_uuid:\n result = event.get_body().strip() or ''\n async_res = self.conf_sync_jobs.pop(job_uuid, None)\n if async_res is None:\n return\n elif async_res is True:\n self.log.info(\"Conference Api (async) Response for JobUUID %s -- %s\" % (job_uuid, result))\n return\n async_res.set(result)\n self.log.info(\"Conference Api (sync) Response for JobUUID %s -- %s\" % (job_uuid, result))", "def test_stress_send_and_receive_messages_medium(self):\n nthreads = 10\n nmessages = 10000\n message_rate = 5000 # About the max on a decent laptop\n drop_rate = 0.1\n transport_failure_rate = 0.1\n max_transport_delay = 1.5 # seconds\n transport = MockTransport(\"localhost\")\n stresser = StressTester(self, nthreads, transport)\n stresser.open()\n transport.set_transport_characteristics(transport_failure_rate,\n max_transport_delay)\n stresser.submit_messages(nmessages, message_rate, drop_rate)\n self.assertFalse(stresser.invoker.exceptions) # no exceptions\n stresser.close()\n transport.close()", "def handle(req):\n sleep_time = int(req)\n time.sleep(sleep_time)\n return f'sleep job done for {req} s'", "def test_redelivery_of_rejected_messages_after_restart(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 9999)\n localConfig.requeue_delay = 1\n localConfig.submit_sm_throughput = 1\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 4 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 4:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n msgid = yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 5 seconds before stopping\n yield waitFor(5)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(5)\n\n # Save the count before starting the connector\n _submitRecordsCount = len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Wait for 5 seconds before starting again\n yield waitFor(5)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 10 seconds before stopping , all the rest of the queue must be sent\n yield waitFor(10)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(10)\n\n # Update the counter\n _submitRecordsCount += len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Assertions\n self.assertEqual(_submitRecordsCount, 4)", "def test_request_extra_zero_values():\n def mock_request(*_, **__):\n mocked_resp = Response()\n mocked_resp.status_code = HTTPNotFound.code\n return mocked_resp\n\n with mock.patch(\"requests.Session.request\", side_effect=mock_request) as mocked_request:\n resp = request_extra(\"get\", \"http://whatever\", retries=0, allowed_codes=[HTTPOk.code])\n assert resp.status_code == HTTPGatewayTimeout.code, \"failing request with no retry should produce timeout\"\n assert mocked_request.call_count == 1\n\n sleep_counter = {\"called_count\": 0, \"called_with\": []}\n\n def mock_sleep(delay):\n sleep_counter[\"called_count\"] += 1\n sleep_counter[\"called_with\"].append(delay)\n\n with mock.patch(\"weaver.utils.get_settings\", return_value={\"cache.request.enabled\": \"false\"}):\n with mock.patch(\"requests.Session.request\", side_effect=mock_request) as mocked_request:\n with mock.patch(\"weaver.utils.time.sleep\", side_effect=mock_sleep):\n # if backoff is not correctly handled as explicit zero, the default backoff value would be used\n # to calculate the delay between requests which should increase with backoff formula and retry count\n resp = request_extra(\"get\", \"http://whatever\", backoff=0, retries=3, allowed_codes=[HTTPOk.code])\n assert resp.status_code == HTTPGatewayTimeout.code\n assert mocked_request.call_count == 4 # first called directly, then 3 times for each retry\n\n # since backoff factor multiplies all incrementally increasing delays between requests,\n # proper detection of input backoff=0 makes all sleep calls equal to zero\n assert all(backoff == 0 for backoff in sleep_counter[\"called_with\"])\n assert sleep_counter[\"called_count\"] == 3 # first direct call doesn't have any sleep from retry", "def handle_message(self, message):", "def send_message(data):\n if data is not None:\n logging.debug(data)\n queue.on_next(data)", "async def send_queue_messages(queue_client, base64_message, queue_msg):\n try:\n await queue_client.send_message(base64_message)\n except Exception: # pylint: disable=bare-except\n logging.exception(f\"{HEADER} Failed to send message {queue_msg} to queue\")\n # Raise exception to let azure function retry whole batch again\n raise", "def listener_callback(self, topic, msg):\n netMessage = SocketMessage(mType=MessageType.MESSAGE, mTopic=topic, mPayload=msg)\n item = PrioritizedItem(priority=self.topic_priorities[topic], item=netMessage)\n\n try:\n self.message_queue.put_nowait(item)\n except queue.Full as ex:\n ## TODO handle queue full issue - shouldn't hit this too often, we either need more workers or too much data is being sent\n # self.get_logger().error(f'Queue is full! {str(ex)}')\n self.metric_handler.increment_dropped()\n except Exception as ex:\n # some other error\n self.get_logger().error(f'Error queuing message {str(ex)}')", "def test_standup_send_long_message(url, _pre_setup):\n\n token, channel_id = _pre_setup[0]['token'], _pre_setup[2]\n\n standup_active_data = {\n 'token': token,\n 'channel_id': channel_id\n }\n\n active_response = requests.get(url + \"standup/active\", params=standup_active_data)\n assert json.loads(active_response.text)['is_active'] == True\n\n message_1000 = 'a' * 1002\n\n standup_send_data = {\n 'token': token,\n 'channel_id': channel_id,\n 'message': message_1000\n }\n\n response = requests.post(url + \"standup/send\", json=standup_send_data)\n assert response.status_code == 400", "def _send(self):\n dmpd_response_status = json.dumps(self.status)\n drs = sizeof_fmt(len(dmpd_response_status))\n\n status_sent = False\n output_query_count = 0\n\n queues = []\n executor_keys = self.job.executor_id.split('-')\n for k in range(int(len(executor_keys)/2)):\n qname = 'lithops-{}'.format('-'.join(executor_keys[0:k*3+2]))\n queues.append(qname)\n\n while not status_sent and output_query_count < 5:\n output_query_count = output_query_count + 1\n try:\n with self._create_channel() as ch:\n for queue in queues:\n ch.basic_publish(exchange='', routing_key=queue, body=dmpd_response_status)\n logger.info(\"Execution status sent to RabbitMQ - Size: {}\".format(drs))\n status_sent = True\n except Exception:\n time.sleep(0.2)\n\n if self.status['type'] == '__end__':\n super()._send()", "async def send(self):\n message = b'foo\\nbar\\nbaz\\nqux\\n'\n for b in message:\n await asyncio.sleep(0.5)\n self.transport.serial.write(bytes([b]))\n print(f'Writer sent: {bytes([b])}')\n self.transport.close()", "def subscribe(receiver, catchup):", "def throttle(f):\n def wrapper(self, *args, **kwargs):\n if self.made_requests < self.max_requests:\n time.sleep(self.delay)\n f(self, *args, **kwargs)\n self.made_requests += 1\n else:\n raise Exception, 'maximum request limit reached'\n return wrapper", "def test_dispatch_raw(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_raw('fooconn.foo', msg)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [msg])", "def direct_message(self, user, msg, num):\n PAUSE = 1\n logging.info('Send message {} to {}'.format(msg,user))\n self.driver.get(self.direct_url)\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[1]/div/div[2]/input')[0].send_keys(user)\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]')[0].click() #Edge case to get rid of notification\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[2]/div/div/div[3]/button')[0].click()\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[1]/div/div[2]/div/button')[0].click()\n time.sleep(PAUSE)\n # The message will be placed and sent\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea')[0].send_keys(msg)\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button')[0].click()\n # Special feature involving reacting with heart\n for x in range(num):\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/button[2]')[0].click()\n time.sleep(PAUSE)", "def __add_trump_bot_jobs__(self, seconds, **kwargs): \n self.add_job(self.send_latest_tweets, 'interval', seconds=seconds, max_instances=1, **kwargs)\n self.add_job(self.resend_bad_tweets, 'interval', seconds=seconds*2, max_instances=1, **kwargs)\n logging.info('Added send_latest_tweets and resend_bad_tweets jobs')", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def __call__(self, func, *args):\n\n def wrapped_func(*args, **kwargs):\n\n count = 0\n while True:\n response = func(*args, **kwargs)\n if response.status_code in range(200, 300):\n return response\n elif response.status_code >= 500:\n if count == self.retry_count:\n return response\n else:\n time.sleep(pow(2, count))\n count += 1\n continue\n else:\n return response\n\n return wrapped_func", "def url_socket_retry(func, *args, **kw):\n min_delay = 1\n max_delay = 32\n max_attempts = 4\n\n for idx, delay in enumerate(\n backoff_delays(min_delay, max_delay, jitter=True)):\n try:\n return func(*args, **kw)\n except HTTPError as err:\n if not (err.status == 503 and 'Slow Down' in err.reason):\n raise\n if idx == max_attempts - 1:\n raise\n except URLError as err:\n if not isinstance(err.reason, socket.error):\n raise\n if err.reason.errno not in (104, 110):\n raise\n if idx == max_attempts - 1:\n raise\n\n time.sleep(delay)", "def _submit_for_retry(entry_id, email_id, to_list, global_email_context, current_exception, subtask_status, skip_retry_max=False):\r\n task_id = subtask_status.task_id\r\n log.info(\"Task %s: Successfully sent to %s users; failed to send to %s users (and skipped %s users)\",\r\n task_id, subtask_status.succeeded, subtask_status.failed, subtask_status.skipped)\r\n\r\n # Calculate time until we retry this task (in seconds):\r\n # The value for max_retries is increased by the number of times an \"infinite-retry\" exception\r\n # has been retried. We want the regular retries to trigger max-retry checking, but not these\r\n # special retries. So we count them separately.\r\n max_retries = _get_current_task().max_retries + subtask_status.retried_nomax\r\n base_delay = _get_current_task().default_retry_delay\r\n if skip_retry_max:\r\n # once we reach five retries, don't increase the countdown further.\r\n retry_index = min(subtask_status.retried_nomax, 5)\r\n exception_type = 'sending-rate'\r\n # if we have a cap, after all, apply it now:\r\n if hasattr(settings, 'BULK_EMAIL_INFINITE_RETRY_CAP'):\r\n retry_cap = settings.BULK_EMAIL_INFINITE_RETRY_CAP + subtask_status.retried_withmax\r\n max_retries = min(max_retries, retry_cap)\r\n else:\r\n retry_index = subtask_status.retried_withmax\r\n exception_type = 'transient'\r\n\r\n # Skew the new countdown value by a random factor, so that not all\r\n # retries are deferred by the same amount.\r\n countdown = ((2 ** retry_index) * base_delay) * random.uniform(.75, 1.25)\r\n\r\n log.warning('Task %s: email with id %d not delivered due to %s error %s, retrying send to %d recipients in %s seconds (with max_retry=%s)',\r\n task_id, email_id, exception_type, current_exception, len(to_list), countdown, max_retries)\r\n\r\n # we make sure that we update the InstructorTask with the current subtask status\r\n # *before* actually calling retry(), to be sure that there is no race\r\n # condition between this update and the update made by the retried task.\r\n update_subtask_status(entry_id, task_id, subtask_status)\r\n\r\n # Now attempt the retry. If it succeeds, it returns a RetryTaskError that\r\n # needs to be returned back to Celery. If it fails, we return the existing\r\n # exception.\r\n try:\r\n send_course_email.retry(\r\n args=[\r\n entry_id,\r\n email_id,\r\n to_list,\r\n global_email_context,\r\n subtask_status.to_dict(),\r\n ],\r\n exc=current_exception,\r\n countdown=countdown,\r\n max_retries=max_retries,\r\n throw=True,\r\n )\r\n except RetryTaskError as retry_error:\r\n # If the retry call is successful, update with the current progress:\r\n log.exception(u'Task %s: email with id %d caused send_course_email task to retry.',\r\n task_id, email_id)\r\n return subtask_status, retry_error\r\n except Exception as retry_exc:\r\n # If there are no more retries, because the maximum has been reached,\r\n # we expect the original exception to be raised. We catch it here\r\n # (and put it in retry_exc just in case it's different, but it shouldn't be),\r\n # and update status as if it were any other failure. That means that\r\n # the recipients still in the to_list are counted as failures.\r\n log.exception(u'Task %s: email with id %d caused send_course_email task to fail to retry. To list: %s',\r\n task_id, email_id, [i['email'] for i in to_list])\r\n num_failed = len(to_list)\r\n subtask_status.increment(subtask_status, failed=num_failed, state=FAILURE)\r\n return subtask_status, retry_exc", "def answer_waiting_call(self) -> None:", "def _messages_post(self, queue, messages, min_msg_count, max_msg_count):\n with atomic.ActionTimer(self, \"zaqar.post_between_%s_and_%s_messages\" %\n (min_msg_count, max_msg_count)):\n queue.post(messages)", "def _process_whisper_queue(self, whisper_queue):\n while True:\n if len(whisper_queue) > 0:\n whisper_tuple = (whisper_queue.pop())\n self.ts.send_whisper(whisper_tuple[0], whisper_tuple[1])\n time.sleep(.5)", "def process_AResponse(self) :\n while (1):\n str = self.recv(self.sock)\n if (len(str) > 0):\n response = amazon_pb2.AResponses()\n response.ParseFromString(str)\n print(response)\n # handle import new stock\n for arrive in response.arrived:\n things = arrive.things\n for thing in things:\n products = Whstock.objects.filter(pid = thing.id)\n if len(products) != 0:\n products[0].count = products[0].count + thing.count\n products[0].save()\n else :\n #need to specify world id\n whstock = Whstock()\n whstock.hid = arrive.whnum\n whstock.pid = thing.id\n whstock.dsc = thing.description\n whstock.count = thing.count\n whstock.save()\n # handle pack ready response\n #when ready send AU command to let UPS truck pickup,\n #use another thread for wait for UPS response\n #when receive response send ALoad command\n #when reveived loaded for Sim send AU command and let flag = 1;\n # tell UPS packages is ready and ask for trucks (provide destinaiton address)\n # tell warehouse to load when UPS trucks ready\n for currReady in response.ready:\n #save current state\n trans = Transaction.objects.get(ship_id = currReady)\n trans.ready = True\n trans.save()\n #connect to UPS\n ups_handler = threading.Thread(target=self.process_Uresponse, args=(trans,))\n ups_handler.start()\n self.AUCommand(trans, 0)\n print(\"first msg for UPS sent(to pickup)\")\n ups_handler.join()\n\n #load info from sim\n for load in response.loaded:\n #save current state\n trans = Transaction.objects.get(ship_id = load)\n trans.loaded = True\n trans.save()\n #connect to UPS\n self.AUCommand(trans, 1)\n print(\"second msg for UPS sent(get load success from sim world)\")", "def generate_message(self, mtu):\r\n raise GeneratorExit(\"No more message to send\")", "def _finalize(self, response, batch):\n if not response:\n return # Could happen in the case of backoff failing enitrely\n\n # Check for failures that occurred in PutRecordBatch after several backoff attempts\n # And log the actual record from the batch\n failed = self._check_failures(response, batch=batch)\n\n # Remove the failed messages in this batch for an accurate metric\n successful_records = len(batch) - failed\n\n MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_RECORDS_SENT, successful_records)\n LOGGER.info(\n 'Successfully sent %d message(s) to queue %s',\n successful_records,\n self.queue.url\n )", "def _on_too_many_orders(self, msg):\r\n self.debug(\"### Server said: '%s\" % msg[\"message\"])\r\n self.count_submitted -= 1\r\n self.signal_order_too_fast(self, msg)", "def _doBack(self):\n argin = [\"2\", \"0\"] # to send string array with two arg...\n self._executeServerTask(self._cmdBack, argin)", "def queueStatusAll():", "def handleMessage(msg):", "def test_queue_empty(self, affiliate_items):\n tick = 0\n\n def throttle(*args, **kwargs):\n nonlocal tick\n sleep(tick * 2)\n tick += 1\n\n update_function = mock.Mock(side_effect=throttle)\n batch_job = BatchJob(affiliate_items, update_function)\n\n completed_jobs = 0\n\n with mock.patch('chiton.rack.affiliates.bulk.QUEUE_TIMEOUT', 1):\n with pytest.raises(BatchError):\n for result in batch_job.run():\n completed_jobs += 1\n\n assert completed_jobs == 1" ]
[ "0.74842525", "0.69976413", "0.61308867", "0.6106848", "0.6101564", "0.60256076", "0.57225543", "0.56751674", "0.55376226", "0.5528896", "0.5524516", "0.55091655", "0.55081546", "0.5478417", "0.54683506", "0.54618955", "0.5446962", "0.541031", "0.53984827", "0.53874975", "0.53675365", "0.53424597", "0.53339624", "0.53096616", "0.5259868", "0.5244003", "0.52321005", "0.5226234", "0.522578", "0.52211577", "0.5215621", "0.52050114", "0.51975864", "0.51825583", "0.517799", "0.51633495", "0.51352024", "0.51349115", "0.51292557", "0.5123629", "0.5121968", "0.5117416", "0.51084733", "0.5104975", "0.50973994", "0.50854814", "0.50828487", "0.50794005", "0.5075371", "0.50744367", "0.5074429", "0.5058775", "0.50493824", "0.5047613", "0.5045532", "0.50406396", "0.50390047", "0.503668", "0.5034676", "0.5017627", "0.5015939", "0.5015939", "0.50158024", "0.50152135", "0.5014626", "0.5009532", "0.5006025", "0.5003431", "0.49897245", "0.49876302", "0.4980596", "0.49783182", "0.4975242", "0.49747866", "0.4966244", "0.4957493", "0.49532926", "0.49508548", "0.49479744", "0.49474612", "0.4942627", "0.49338326", "0.4931184", "0.49281284", "0.49281284", "0.49281284", "0.49280152", "0.49201226", "0.49154714", "0.49150056", "0.49114996", "0.49108654", "0.4907758", "0.49075595", "0.490187", "0.4900611", "0.4897512", "0.48858345", "0.48856902", "0.48849693" ]
0.5390745
19